{"seq_id": "3153601065", "text": "import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"pymyastuce\",\n version=\"0.0.3\",\n author=\"Paul Rohja LESELLIER\",\n author_email=\"rohja@rohja.com\",\n description=\"A small package to fetch next bus/metro/tram at a station of the MyAstuce network in Rouen, France.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/Rohja/pymyastuce\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/Rohja/pymyastuce/issues\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n ],\n package_dir={\"\": \"src\"},\n packages=setuptools.find_packages(where=\"src\"),\n python_requires=\">=3.6\",\n)", "repo_name": "Rohja/pymyastuce", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 895, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "setuptools.setup", "line_number": 6, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 24, "usage_type": "call"}]} {"seq_id": "4417809755", "text": "import numpy as np\nfrom io import StringIO\n\ninput_string = '''\n25 2 50 1 500 127900\n39 3 10 1 1000 222100\n13 2 13 1 1000 143750\n82 5 20 2 120 268000\n130 6 10 2 600 460700\n115 6 10 1 550 407000\n'''\n\nnp.set_printoptions(precision=1) # this just changes the output settings for easier reading\n \ndef fit_model(input_file):\n # Please write your code inside this function\n data= np.genfromtxt(input_file, skip_header=1)\n # read the data in and fit it. the values below are placeholder values\n c = np.asarray([]) # coefficients of the linear regression\n x = np.asarray([]) # input data to the linear regression\n y = np.asarray([])\n\n i=len(data)-1\n while i>=0:\n last =data[i][-1]\n y = np.insert(y,0,last,axis=0)\n i-=1\n\n x=data[:,:-1]\n c=np.linalg.lstsq(x,y)[0]\n print(c)\n print(x @ c)\n\n# simulate reading a file\ninput_file = StringIO(input_string)\nfit_model(input_file)\n", "repo_name": "piilolav/Building-AI---Uni-Helsinki", "sub_path": "Exercises/Chapter 3 - Machine Learning/ex13-Predictions With More Data.py", "file_name": "ex13-Predictions With More Data.py", "file_ext": "py", "file_size_in_byte": 924, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "numpy.set_printoptions", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.linalg.lstsq", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 30, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 35, "usage_type": "call"}]} {"seq_id": "42449059260", "text": "import asyncio\n\nfrom hiss.handler import gntp, snp, xbmc\n\nSCHEME_HANDLERS = {\n 'gntp': gntp.GNTPHandler,\n 'snp': snp.SNPHandler,\n 'xbmc': xbmc.XBMCHandler,\n}\n\nclass Nub(object):\n \"\"\"If a message arrives from any of the registered targets then\n send it on to the other targets.\n \n Notifications can be filtered per target.\n \"\"\"\n\n def __init__(self, targets=[]):\n self._targets = []\n self._handlers = {}\n \n for target in targets:\n self.add_target(target)\n \n @asyncio.coroutine\n def async_handler(self, target):\n target.handler.subscribe()\n \n def go(self):\n \"\"\"Connect to all targets\n \"\"\"\n asyncio.Task\n \n def add_target(self, target):\n if target in self._targets:\n return\n \n scheme = target.scheme\n if scheme not in self._handlers:\n handler = SCHEME_HANDLERS[scheme]()\n self._handlers[scheme] = handler\n \n target.handler = self._handlers[scheme]\n self._targets.append(target)\n ", "repo_name": "sffjunkie/hiss", "sub_path": "src/working/nub.py", "file_name": "nub.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "hiss.handler.gntp.GNTPHandler", "line_number": 6, "usage_type": "attribute"}, {"api_name": "hiss.handler.gntp", "line_number": 6, "usage_type": "name"}, {"api_name": "hiss.handler.snp.SNPHandler", "line_number": 7, "usage_type": "attribute"}, {"api_name": "hiss.handler.snp", "line_number": 7, "usage_type": "name"}, {"api_name": "hiss.handler.xbmc.XBMCHandler", "line_number": 8, "usage_type": "attribute"}, {"api_name": "hiss.handler.xbmc", "line_number": 8, "usage_type": "name"}, {"api_name": "asyncio.coroutine", "line_number": 25, "usage_type": "attribute"}, {"api_name": "asyncio.Task", "line_number": 32, "usage_type": "attribute"}]} {"seq_id": "17928574080", "text": "import pprint\nimport os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom request import BaseRequest\nfrom dotenv import dotenv_values\n\nconfig = dotenv_values()\n\n# Example of GET /filing-products/offerings\n\nclass GetFilingProductsOfferingsRequest(BaseRequest):\n def __init__(self):\n super().__init__()\n \n def get_filing_products_offerings(self, company_id, jurisdiction):\n params = {\n 'company_id': company_id,\n 'jurisdiction': jurisdiction\n }\n return self.make_request('GET', '/filing-products/offerings', params=params)\n\n# run as standalone script by passing any command line argument\nif len(sys.argv) > 1:\n company_id = config['COMPANY_ID']\n jurisdiction = config['JURISDICTION']\n\n request = GetFilingProductsOfferingsRequest()\n response = request.get_filing_products_offerings(company_id, jurisdiction)\n\n pprint.pprint(response)", "repo_name": "corptools-api/CorpTools-API-Examples", "sub_path": "python/examples/get_filing_products_offerings.py", "file_name": "get_filing_products_offerings.py", "file_ext": "py", "file_size_in_byte": 950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "47", "api": [{"api_name": "sys.path.append", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 5, "usage_type": "call"}, {"api_name": "os.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 5, "usage_type": "call"}, {"api_name": "dotenv.dotenv_values", "line_number": 10, "usage_type": "call"}, {"api_name": "request.BaseRequest", "line_number": 14, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 26, "usage_type": "attribute"}, {"api_name": "request.get_filing_products_offerings", "line_number": 31, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 33, "usage_type": "call"}]} {"seq_id": "28325241161", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom fhirclient import client\nimport fhirclient.models.patient as p\nimport fhirclient.models.bundle as bundle\n\nsettings = {'app_id':'xxxx',\n 'api_base': 'https://r2.smarthealthit.org',\n 'patient_id': 'smart-1137192'}\n# In[2]:\n\n\nsettings = {'app_id': 'my-app',\n 'api_base': 'https://fhir.sitenv.org/open/fhir',\n 'app_secret':'my-app-secret-123',\n 'launch_token': 'bXktYXBwOm15LWFwcC1zZWNyZXQtMTIz' \n }\n\n\n# In[3]:\n\n\nsmart = client.FHIRClient(settings=settings)\n\n\n# In[4]:\n\n\n#smart.ready\n#smart.prepare()\n#smart.ready\n#smart.authorize_url\n\n\n# In[5]:\n\n\npatient = p.Patient.read('?_id=1&_format=json', smart.server)\npatient.birthDate.isostring\n\n\n# In[ ]:\n\n\n\n\n", "repo_name": "markregine/FHIR_Python_fhirclient_testing_open", "sub_path": "script_1.py", "file_name": "script_1.py", "file_ext": "py", "file_size_in_byte": 772, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "fhirclient.client.FHIRClient", "line_number": 27, "usage_type": "call"}, {"api_name": "fhirclient.client", "line_number": 27, "usage_type": "name"}, {"api_name": "fhirclient.models.patient.Patient.read", "line_number": 42, "usage_type": "call"}, {"api_name": "fhirclient.models.patient.Patient", "line_number": 42, "usage_type": "attribute"}, {"api_name": "fhirclient.models.patient", "line_number": 42, "usage_type": "name"}]} {"seq_id": "38322037138", "text": "import jsonpatch\nimport re\nfrom cedar.utils import general_utils as utils\n\n\nclass RemoveValueFromPropertiesPatch(object):\n\n def __init__(self):\n self.description = \"Remove the invalid @value property\"\n self.from_version = \"1.0.0\"\n self.to_version = \"1.1.0\"\n\n def is_applied(self, error_message, doc=None):\n if not utils.is_compatible(doc, self.from_version):\n return False\n pattern = re.compile(\n \"object has invalid properties \\(\\['@value'\\]\\) \" \\\n \"at ((/properties/[^/]+/items)*(/properties/[^/]+)*)*/properties$\" \\\n \"|\" \\\n \"object instance has properties which are not allowed by the schema: \\['@value'\\] \" \\\n \"at ((/properties/[^/]+/items)*(/properties/[^/]+)*)*/properties$\")\n return pattern.match(error_message)\n\n def apply_patch(self, doc, error_message):\n patch = self.get_patch(error_message, doc)\n patched_doc = patch.apply(doc)\n return patched_doc\n\n @staticmethod\n def get_patch(error_message, doc=None):\n path = utils.get_error_location(error_message)\n patches = [{\n \"op\": \"remove\",\n \"path\": path + \"/@value\"\n }]\n # Remove @value from required list, if possible\n parent_object, parent_path = utils.get_parent_object(doc, path)\n required_list = parent_object.get(\"required\")\n if required_list:\n patches.append({\n \"op\": \"replace\",\n \"value\": [item for item in required_list if item != \"@value\"],\n \"path\": parent_path + \"/required\"\n })\n return jsonpatch.JsonPatch(patches)\n", "repo_name": "metadatacenter/cedar-util", "sub_path": "scripts/python/cedar/patch/collection/RemoveValueFromPropertiesPatch.py", "file_name": "RemoveValueFromPropertiesPatch.py", "file_ext": "py", "file_size_in_byte": 1671, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "47", "api": [{"api_name": "cedar.utils.general_utils.is_compatible", "line_number": 14, "usage_type": "call"}, {"api_name": "cedar.utils.general_utils", "line_number": 14, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 16, "usage_type": "call"}, {"api_name": "cedar.utils.general_utils.get_error_location", "line_number": 31, "usage_type": "call"}, {"api_name": "cedar.utils.general_utils", "line_number": 31, "usage_type": "name"}, {"api_name": "cedar.utils.general_utils.get_parent_object", "line_number": 37, "usage_type": "call"}, {"api_name": "cedar.utils.general_utils", "line_number": 37, "usage_type": "name"}, {"api_name": "jsonpatch.JsonPatch", "line_number": 45, "usage_type": "call"}]} {"seq_id": "36481762258", "text": "import matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimg=mpimg.imread('image1.jpg')\nprint(img)\nlum_img=img[:, :, 0]\n\nplt.imshow(img)\n\nplt.imshow(lum_img, cmap=\"hot\")\nplt.savefig('output.jpg', bbox_inches='tight')\nplt.show()\n", "repo_name": "subhankarghosh2000/Multimedia-System", "sub_path": "Image edit/image_edit.py", "file_name": "image_edit.py", "file_ext": "py", "file_size_in_byte": 236, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "matplotlib.image.imread", "line_number": 3, "usage_type": "call"}, {"api_name": "matplotlib.image", "line_number": 3, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 7, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 11, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 11, "usage_type": "name"}]} {"seq_id": "30062991922", "text": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nfrom utils import *\nfrom model import EncoderRNN, Attn, AttnDecoder\nfrom train import trainIters\n\n\n#Configure models\nmodel_name = 'cb_model'\nattn_model = 'dot'\n#attn_model = 'general'\n#attn_model = 'concat'\nhidden_size = 500\nencoder_n_layers = 2\ndecoder_n_layers = 2\ndropout = 0.1\nbatch_size = 64\n\nvoc, pairs = loadData('./datasets/conversations.csv')\nn_tokens = voc.size\n\nembedding = nn.Embedding(n_tokens, hidden_size)\nencoder = EncoderRNN(hidden_size, embedding, encoder_n_layers, dropout)\n#attn = Attn(attn_model, hidden_size)\ndecoder = AttnDecoder(attn_model, embedding, hidden_size,\n n_tokens, decoder_n_layers, dropout)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nencoder = encoder.to(device)\ndecoder = decoder.to(device)\n\n# Configure training/optimization\nclip = 50.0\nteacher_forcing_ratio = 1.0\nlearning_rate = 0.0001\ndecoder_learning_ratio = 5.0\nn_iteration = 4000\nprint_every = 1\nsave_every = 500\nsave_dir = \"./checkpoints\"\ncorpus_name = \"movie conversations\"\nloadFilename = None\n\nencoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate)\ndecoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate * decoder_learning_ratio)\n\nencoder.train()\ndecoder.train()\ntrainIters(model_name, voc, pairs, encoder, decoder, encoder_optimizer,\n decoder_optimizer, embedding, encoder_n_layers, decoder_n_layers,\n save_dir, n_iteration, batch_size, print_every, save_every, clip,\n corpus_name, loadFilename)\n", "repo_name": "luopeixiang/simple_chatbot", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1549, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "torch.nn.Embedding", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "model.EncoderRNN", "line_number": 25, "usage_type": "call"}, {"api_name": "model.AttnDecoder", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 46, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 47, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 47, "usage_type": "name"}, {"api_name": "train.trainIters", "line_number": 51, "usage_type": "call"}]} {"seq_id": "32339651839", "text": "import datetime\n\nfrom zdppy_orm import *\nimport logging\n\n# 创建日志对象\nlogger = logging.getLogger(\"zdppy_orm\")\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(logging.StreamHandler())\n\n# 创建数据库连接\ndb = MySQLDatabase('zdppy_orm', host='127.0.0.1', user='root', passwd='root')\n\n\nclass BaseModel(Model):\n class Meta:\n database = db\n\n\nclass User(BaseModel):\n username = TextField()\n\n class Meta:\n table_name = 'user'\n\n\nclass Tweet(BaseModel):\n content = TextField()\n timestamp = DateTimeField(default=datetime.datetime.now)\n user = ForeignKeyField(User, backref=\"tweets\")\n\n class Meta:\n table_name = 'tweet'\n\n\nclass Favorite(BaseModel):\n user = ForeignKeyField(User, backref=\"favorites\")\n tweet = ForeignKeyField(Tweet, backref=\"favorites\")\n\n\nif __name__ == \"__main__\":\n db.connect()\n db.create_tables([User, Tweet, Favorite])\n\n # 添加方式1\n charlie = User(username=\"charlie\")\n rows = charlie.save()\n if rows == 0:\n print(\"未更新数据\")\n\n # 添加方式2\n huey = User.create(username=\"huey\")\n\n # 删除表,要先删除外键\n db.drop_tables([Favorite, Tweet, User])\n", "repo_name": "zhangdapeng520/zdppy_orm", "sub_path": "examples/02添加数据.py", "file_name": "02添加数据.py", "file_ext": "py", "file_size_in_byte": 1177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 8, "usage_type": "attribute"}, {"api_name": "logging.StreamHandler", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "attribute"}]} {"seq_id": "7254955363", "text": "import time\nfrom selenium import webdriver\nimport traceback as tb\nimport time\nimport requests\nfrom bs4 import BeautifulSoup\n\ndriver = webdriver.Chrome('./chromedriver_win32/chromedriver.exe') # Optional argument, if not specified will search path.\nl=[\"secretonlyiknow\"]\nfor i in range(2,6):\n l+=[f\"secretonlyiknows\"]\nmegalist=[]\nfor xx in l:\n driver.get(xx)\n elems = driver.find_elements_by_css_selector(\".city_tab [href]\")\n try:\n for index,i in enumerate( elems):\n if \"View Email ID\" in i.text:\n x=i.get_attribute('href')\n r=requests.get(x)\n soup = BeautifulSoup(r.text, 'html.parser')\n mails = soup.findAll(\"td\", {\"class\": \"table_space_td_right1\"})\n megalist.append( mails[-1].text.split())\n except:\n pass\nwith open('your_file.txt', 'w+') as f:\n for item in megalist:\n if len(item)>1:\n x=\" \".join(item)\n f.write(f'{x}\\n')\n else:\n f.write(f'{item}\\n')\ndriver.quit()\n", "repo_name": "nikhilc2710/funprojects", "sub_path": "seleniumtest.py", "file_name": "seleniumtest.py", "file_ext": "py", "file_size_in_byte": 1033, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "47", "api": [{"api_name": "selenium.webdriver.Chrome", "line_number": 8, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 8, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}]} {"seq_id": "25999467247", "text": "from sklearn import datasets, metrics, preprocessing\nfrom stacked_generalization.lib.stacking import FWLSRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge\nimport numpy as np\n\n\nboston = datasets.load_boston()\nX = preprocessing.StandardScaler().fit_transform(boston.data)\nY = boston.target\n\nX_train = X[:200]\nY_train = Y[:200]\nX_test = X[200:]\nY_test = Y[200:]\n\nbreg = LinearRegression()\nregs = [RandomForestRegressor(n_estimators=50, random_state=1),\n GradientBoostingRegressor(n_estimators=25, random_state=1),\n Ridge(),\n ExtraTreesRegressor(n_estimators=50),\n ]\nfeature_func = lambda x: np.c_[np.ones((x.shape[0], 1)),\n x[:, 1].reshape((x.shape[0], 1)),\n x[:, 6].reshape((x.shape[0], 1)),]\n\nsr = FWLSRegressor(breg,\n regs,\n feature_func,\n n_folds=3,\n verbose=0,\n oob_score_flag=False)\n\nsr.fit(X_train, Y_train)\nscore = metrics.mean_squared_error(sr.predict(X_test), Y_test)\nprint (\"MSE of stacked regressor: %f\" % score)\n", "repo_name": "fukatani/stacked_generalization", "sub_path": "stacked_generalization/example/fwls_regression.py", "file_name": "fwls_regression.py", "file_ext": "py", "file_size_in_byte": 1272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 116, "dataset": "github-code", "pt": "47", "api": [{"api_name": "sklearn.datasets.load_boston", "line_number": 10, "usage_type": "call"}, {"api_name": "sklearn.datasets", "line_number": 10, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.StandardScaler", "line_number": 11, "usage_type": "call"}, {"api_name": "sklearn.preprocessing", "line_number": 11, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 19, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestRegressor", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.ensemble.GradientBoostingRegressor", "line_number": 21, "usage_type": "call"}, {"api_name": "sklearn.linear_model.Ridge", "line_number": 22, "usage_type": "call"}, {"api_name": "sklearn.ensemble.ExtraTreesRegressor", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 25, "usage_type": "call"}, {"api_name": "stacked_generalization.lib.stacking.FWLSRegressor", "line_number": 29, "usage_type": "call"}, {"api_name": "sklearn.metrics.mean_squared_error", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 37, "usage_type": "name"}]} {"seq_id": "12126405483", "text": "from flask import Flask, render_template, session, redirect, url_for, flash\nfrom flask_wtf import FlaskForm\nfrom wtforms import (StringField, BooleanField, validators,\n RadioField, TextAreaField, SubmitField)\n\napp = Flask(__name__)\n\napp.config[\"SECRET_KEY\"] = \"mykey\"\n\n\nclass Form(FlaskForm):\n name = StringField(\"Enter Name\", [validators.DataRequired()])\n\n submit = SubmitField(\"submit\")\n\n\n@app.route(\"/\", methods=[\"POST\", \"GET\"])\ndef home():\n form = Form()\n if form.validate_on_submit():\n session[\"name\"] = form.name.data\n flash(\"thank you! {}\".format(session[\"name\"]))\n\n return redirect(url_for(\"home\"))\n return render_template(\"home.html\", form=form)\n\n\n@app.route(\"/thankyou\", methods=[\"POST\", \"GET\"])\ndef thankyou():\n return render_template(\"thankyou.html\")\n\n\napp.run(debug=True)\n", "repo_name": "sunny812546/flask-session-redirect-url_for-wtforms-", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 844, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask_wtf.FlaskForm", "line_number": 11, "usage_type": "name"}, {"api_name": "wtforms.StringField", "line_number": 12, "usage_type": "call"}, {"api_name": "wtforms.validators.DataRequired", "line_number": 12, "usage_type": "call"}, {"api_name": "wtforms.validators", "line_number": 12, "usage_type": "name"}, {"api_name": "wtforms.SubmitField", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 30, "usage_type": "call"}]} {"seq_id": "28867402589", "text": "import os\nimport logging\nimport random\nimport numpy as np\nimport torch\n\nfrom castle.common.base import BaseLearner, Tensor\nfrom .trainers.al_trainer import Trainer\nfrom .models.masked_model import MaskedModel\nfrom .helpers.utils import callback_after_training\nfrom castle.common.consts import MCSL_VALID_PARAMS\nfrom castle.common.validator import check_args_value\n\n\ndef set_seed(seed):\n \"\"\"\n Referred from:\n - https://stackoverflow.com/questions/38469632/tensorflow-non-repeatable-results\n \"\"\"\n\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n try:\n os.environ['PYTHONHASHSEED'] = str(seed)\n finally:\n pass\n\n\nclass MCSL(BaseLearner):\n \"\"\"\n Masked Gradient-Based Causal Structure Learning\n\n A gradient-based algorithm for non-linear additive noise data by learning\n the binary adjacency matrix.\n\n Parameters\n ----------\n model_type: str, default: 'nn'\n `nn` denotes neural network, `qr` denotes quatratic regression.\n num_hidden_layers: int, default: 4\n Number of hidden layer in neural network when `model_type` is 'nn'.\n hidden_dim: int, default: 16\n Number of hidden dimension in hidden layer, when `model_type` is 'nn'.\n graph_thresh: float, default: 0.5\n Threshold used to determine whether has edge in graph, element greater\n than the `graph_thresh` means has a directed edge, otherwise has not.\n l1_graph_penalty: float, default: 2e-3\n Penalty weight for L1 normalization\n learning_rate: float, default: 3e-2\n learning rate for opitimizer\n max_iter: int, default: 25\n Number of iterations for optimization problem\n iter_step: int, default: 1000\n Number of steps for each iteration\n init_iter: int, default: 2\n Initial iteration to disallow early stopping\n h_tol: float, default: 1e-10\n Tolerance of optimization problem\n init_rho: float, default: 1e-5\n Initial value for penalty parameter.\n rho_thresh: float, default: 1e14\n Threshold for penalty parameter.\n h_thresh: float, default: 0.25\n Threshold for h\n rho_multiply: float, default: 10.0\n Multiplication to amplify rho each time\n temperature: float, default: 0.2\n Temperature for gumbel sigmoid\n device_type: str, default: 'cpu'\n 'cpu' or 'gpu'\n device_ids: int or str, default '0'\n CUDA devices, it's effective when ``use_gpu`` is True.\n For single-device modules, ``device_ids`` can be int or str, e.g. 0 or '0',\n For multi-device modules, ``device_ids`` must be str, format like '0, 1'.\n random_seed: int, default: 1230\n random seed for every random value\n\n References\n ----------\n https://arxiv.org/abs/1910.08527\n\n Examples\n --------\n >>> from castle.algorithms import MCSL\n >>> from castle.datasets import load_dataset\n >>> from castle.common import GraphDAG\n >>> from castle.metrics import MetricsDAG\n >>> true_dag, X = load_dataset(name='iid_test')\n >>> n = MCSL(iter_step=1000, rho_thres=1e14, init_rho=1e-5,\n ... rho_multiply=10, graph_thres=0.5, l1_graph_penalty=2e-3)\n >>> n.learn(X)\n >>> GraphDAG(n.causal_matrix, true_dag)\n >>> met = MetricsDAG(n.causal_matrix, true_dag)\n >>> print(met.metrics)\n \"\"\"\n\n @check_args_value(MCSL_VALID_PARAMS)\n def __init__(self, model_type='nn', num_hidden_layers=4, hidden_dim=16,\n graph_thresh=0.5, l1_graph_penalty=2e-3, learning_rate=3e-2,\n max_iter=25, iter_step=1000, init_iter=2, h_tol=1e-10,\n init_rho=1e-5, rho_thresh=1e14, h_thresh=0.25,\n rho_multiply=10, temperature=0.2, device_type='cpu',\n device_ids='0', random_seed=1230) -> None:\n super(MCSL, self).__init__()\n\n self.model_type = model_type\n self.num_hidden_layers = num_hidden_layers\n self.hidden_dim = hidden_dim\n self.graph_thresh = graph_thresh\n self.l1_graph_penalty = l1_graph_penalty\n self.learning_rate = learning_rate\n self.max_iter = max_iter\n self.iter_step = iter_step\n self.init_iter = init_iter\n self.h_tol = h_tol\n self.init_rho = init_rho\n self.rho_thresh = rho_thresh\n self.h_thresh = h_thresh\n self.rho_multiply = rho_multiply\n self.temperature = temperature\n self.device_type = device_type\n self.device_ids = device_ids\n self.random_seed = random_seed\n\n if torch.cuda.is_available():\n logging.info('GPU is available.')\n else:\n logging.info('GPU is unavailable.')\n if self.device_type == 'gpu':\n raise ValueError(\"GPU is unavailable, \"\n \"please set device_type = 'cpu'.\")\n if self.device_type == 'gpu':\n if self.device_ids:\n os.environ['CUDA_VISIBLE_DEVICES'] = str(self.device_ids)\n device = torch.device('cuda')\n else:\n device = torch.device('cpu')\n self.device = device\n\n def learn(self, data, columns=None, pns_mask=None, **kwargs) -> None:\n \"\"\"\n Set up and run the MCSL algorithm.\n\n Parameters\n ----------\n data: castle.Tensor or numpy.ndarray\n The castle.Tensor or numpy.ndarray format data you want to learn.\n columns: Index or array-like\n Column labels to use for resulting tensor. Will default to\n RangeIndex (0, 1, 2, ..., n) if no column labels are provided.\n pns_mask: array_like or None\n The mask matrix.\n array with element in {0, 1}, ``0`` denotes has no edge in i -> j,\n ``1`` denotes maybe has edge in i -> j or not.\n \"\"\"\n\n x = Tensor(data, columns=columns)\n\n self.n_samples, self.n_nodes = x.shape\n if pns_mask is None:\n pns_mask = torch.ones([x.shape[1], x.shape[1]], device=self.device)\n else:\n pns_mask = torch.tensor(pns_mask, device=self.device)\n\n causal_matrix, causal_matrix_weight = self._mcsl(x, pns_mask)\n\n self.causal_matrix_weight = Tensor(causal_matrix_weight,\n index=x.columns,\n columns=x.columns\n )\n self.causal_matrix = Tensor(causal_matrix,\n index=x.columns,\n columns=x.columns\n )\n\n def _mcsl(self, x, pns_mask) -> tuple:\n \"\"\"\n Starting model of MCSL.\n\n Parameters\n ----------\n x: torch.Tensor\n The torch.Tensor data you want to learn.\n pns_mask: torch.Tensor\n The mask matrix.\n \"\"\"\n\n set_seed(self.random_seed)\n\n model = MaskedModel(model_type=self.model_type,\n n_samples=self.n_samples,\n n_nodes=self.n_nodes,\n pns_mask=pns_mask,\n num_hidden_layers=self.num_hidden_layers,\n hidden_dim=self.hidden_dim,\n l1_graph_penalty=self.l1_graph_penalty,\n seed=self.random_seed,\n device=self.device)\n trainer = Trainer(model=model,\n learning_rate=self.learning_rate,\n init_rho=self.init_rho,\n rho_thresh=self.rho_thresh,\n h_thresh=self.h_thresh,\n rho_multiply=self.rho_multiply,\n init_iter=self.init_iter,\n h_tol=self.h_tol,\n temperature=self.temperature,\n device=self.device)\n\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x, device=self.device)\n w_logits = trainer.train(x, self.max_iter, self.iter_step)\n\n w_est, w_est_weight = callback_after_training(w_logits,\n self.temperature,\n self.graph_thresh)\n return w_est.detach().cpu().numpy(), w_est_weight.detach().cpu().numpy()\n", "repo_name": "huawei-noah/trustworthyAI", "sub_path": "gcastle/castle/algorithms/gradient/mcsl/torch/mcsl.py", "file_name": "mcsl.py", "file_ext": "py", "file_size_in_byte": 8330, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 830, "dataset": "github-code", "pt": "47", "api": [{"api_name": "random.seed", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 22, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 25, "usage_type": "attribute"}, {"api_name": "castle.common.base.BaseLearner", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 126, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 127, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 129, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 135, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 138, "usage_type": "call"}, {"api_name": "castle.common.validator.check_args_value", "line_number": 98, "usage_type": "call"}, {"api_name": "castle.common.consts.MCSL_VALID_PARAMS", "line_number": 98, "usage_type": "argument"}, {"api_name": "castle.common.base.Tensor", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 164, "usage_type": "call"}, {"api_name": "castle.common.base.Tensor", "line_number": 168, "usage_type": "call"}, {"api_name": "castle.common.base.Tensor", "line_number": 172, "usage_type": "call"}, {"api_name": "models.masked_model.MaskedModel", "line_number": 191, "usage_type": "call"}, {"api_name": "trainers.al_trainer.Trainer", "line_number": 200, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 211, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 212, "usage_type": "call"}, {"api_name": "helpers.utils.callback_after_training", "line_number": 215, "usage_type": "call"}]} {"seq_id": "32767170848", "text": "import cv2\nimport numpy as np\n\n\n\n# 탑뷰 변환 행렬을 구하는 함수\n# 변환행렬, 변환된이미지가로, 변환된이미지세로 반환\ndef get_trans_matrix(tl, bl, tr, br):\n\n # Original Image\n pts1 = np.float32([tl, bl, tr, br])\n\n # 변환된 이미지의 가로세로 길이 계산\n width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n\n trans_image_weight = max(int(width_a), int(width_b))\n trans_image_height = max(int(height_a), int(height_b))\n\n # 변환된 새로운 이미지의 가로세로 행렬 만들기\n pts2 = np.array([\n [0, 0],\n [0, trans_image_height - 1],\n [trans_image_weight - 1, 0],\n [trans_image_weight - 1, trans_image_height - 1]], dtype=\"float32\")\n\n # 변환 행렬\n trans_matrix = cv2.getPerspectiveTransform(pts1, pts2)\n\n return trans_matrix, trans_image_weight, trans_image_height\n\n\n\nimg = cv2.imread('/Users/itaegyeong/Desktop/tt.png')\nM, w, h = get_trans_matrix([198,251], [16,378], [397, 246], [586,383])\ndst = cv2.warpPerspective(img, M, (w, h))\ncv2.imshow('original',img)\ncv2.imshow('transfer',dst)\ncv2.imwrite('transfer.jpg',dst)\ncv2.imwrite('original.jpg',img)\ncv2.waitKey(0)", "repo_name": "SWMaestro8th/WatchCoach_ML", "sub_path": "tt.py", "file_name": "tt.py", "file_ext": "py", "file_size_in_byte": 1414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "47", "api": [{"api_name": "numpy.float32", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 30, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 40, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 43, "usage_type": "call"}]} {"seq_id": "21340774459", "text": "import pytest\n\nfrom cajitos_site import mail\nfrom tests.utils import captured_templates\n\n\ndef test_home(app):\n with captured_templates(app) as templates:\n rv = app.test_client().get('/')\n assert rv.status_code == 200\n assert len(templates) == 1\n template, context = templates[0]\n assert template.name == 'blog.html'\n print(context)\n assert len(context['blog']) == 5 # pagination\n # assert {p.author.id for p in context['blog']} == {2} # Author of the latest 5 blog\n\n\ndef test_base(app):\n rv = app.test_client().get('/blog')\n assert rv.status_code == 200\n\n print(dir(rv))\n print(rv.stream)\n\n\n@pytest.mark.skip\ndef test_service_email(user, app):\n with mail.record_messages() as outbox:\n mail.send_message(subject='testing',\n body='test',\n recipients=[user.email])\n\n assert len(outbox) == 1\n assert outbox[0].subject == \"testing\"\n", "repo_name": "OlgaKuratkina/cajitos", "sub_path": "tests/test_base.py", "file_name": "test_base.py", "file_ext": "py", "file_size_in_byte": 974, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "tests.utils.captured_templates", "line_number": 8, "usage_type": "call"}, {"api_name": "cajitos_site.mail.record_messages", "line_number": 29, "usage_type": "call"}, {"api_name": "cajitos_site.mail", "line_number": 29, "usage_type": "name"}, {"api_name": "cajitos_site.mail.send_message", "line_number": 30, "usage_type": "call"}, {"api_name": "cajitos_site.mail", "line_number": 30, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 27, "usage_type": "attribute"}]} {"seq_id": "13160640627", "text": "import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom typing import List\n\nclass MyDataset(Dataset):\n def __init__(self, data):\n self.data = data\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n sample = self.data[idx]\n return sample\n\n\n# def normalize(adj):\n# \"\"\"Normalization by D^{-1/2} (A+I) D^{-1/2}.\"\"\"\n# rowsum = adj.sum(dim=1) + 1e-20\n# d_inv_sqrt = torch.pow(rowsum, -0.5).flatten()\n# d_inv_sqrt[d_inv_sqrt == float('inf')] = 0.\n# d_mat_inv_sqrt = torch.diag(d_inv_sqrt).to(adj.dtype)\n# adj = adj.mm(d_mat_inv_sqrt).t().mm(d_mat_inv_sqrt)\n# return adj\n\n\n# def row_normalize(adj):\n# \"\"\"Row-normalize sparse matrix\"\"\"\n# rowsum = np.array(adj.sum(1)).flatten()\n# d_inv = 1.0 / (np.maximum(1.0, rowsum))\n# d_mat_inv = sp.diags(d_inv, 0)\n# adj = d_mat_inv.dot(adj)\n# return adj\n\n# def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n# \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n# sparse_mx = sparse_mx.tocoo().astype(np.float32)\n# if len(sparse_mx.row) == 0 and len(sparse_mx.col) == 0:\n# indices = torch.LongTensor([[], []])\n# else:\n# indices = torch.from_numpy(\n# np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n# values = torch.from_numpy(sparse_mx.data)\n# shape = torch.Size(sparse_mx.shape)\n# return indices, values, shape\n\ndef row_normalize(tensor):\n if tensor.layout is torch.sparse_coo:\n tensor = tensor.coalesce()\n row_sum = torch.sparse.sum(tensor, dim=1).to_dense() + 1e-20\n normalized_tensor = torch.sparse.FloatTensor(tensor.indices(), tensor.values() / row_sum[tensor.indices()[0]], tensor.size())\n else:\n row_sum = tensor.sum(dim=1, keepdim=True) + 1e-20\n normalized_tensor = tensor / row_sum\n return normalized_tensor\n\ndef normalize(adj):\n if adj.layout is torch.sparse_coo:\n adj = adj.coalesce()\n rowsum = torch.sparse.sum(adj, dim=1).to_dense() + 1e-20\n d_inv_sqrt = torch.pow(rowsum, -0.5)\n d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.\n adj_normalized = torch.sparse.FloatTensor(adj.indices(), adj.values() * d_inv_sqrt[adj.indices()[0]] * d_inv_sqrt[adj.indices()[1]],\n adj.size())\n else:\n rowsum = adj.sum(dim=1) + 1e-20\n d_inv_sqrt = torch.pow(rowsum, -0.5)\n d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = torch.diag(d_inv_sqrt)\n adj = adj.to(d_mat_inv_sqrt.dtype)\n adj_normalized = adj.matmul(d_mat_inv_sqrt).transpose(0, 1).matmul(d_mat_inv_sqrt)\n return adj_normalized\n\n# 节点采样\ndef node_wise_sampling(A:torch.Tensor, previous_nodes:torch.Tensor, sample_num:int):\n \"\"\"\n A:torch.Tesor, 所有待选邻居节点(一个节点的所有邻居节点是包括它自己本身的)的邻接矩阵,\n 行列数一样,对角线上都是1,即自己和自己连接\n previous_nodes: 上一层的节点在矩阵A中的index,而不是global id, 要求以在A中的ID从小到大的顺序排列\n sample_num:每个节点采样的节点数\n\n 返回的adj用于前向传播中\n sampled_nodes用于下一层采样,对应A中的index,不是global id\n previous_index是previous_nodes在after_nodes中的索引,后面训练时要用,从第一层到最后一层组成一个list,传给Graphsage_first中的参数previous_indices\n \"\"\"\n U = A[previous_nodes,:]\n sampled_nodes = []\n for U_row in U:\n indices = U_row.nonzero().flatten()\n sampled_indices = indices[torch.randperm(indices.shape[0])[:sample_num]]\n sampled_nodes.append(sampled_indices)\n sampled_nodes = torch.unique(torch.cat(sampled_nodes))\n sampled_nodes = torch.unique(torch.cat([torch.tensor(previous_nodes), sampled_nodes]), sorted=True)\n adj = U[:, sampled_nodes]\n adj = row_normalize(adj)\n\n previous_index = torch.where(torch.isin(sampled_nodes, torch.tensor(previous_nodes)))[0]\n\n\n return adj, sampled_nodes, previous_index\n\n# 层采样\ndef layer_wise_sampling(A:torch.Tensor,previous_nodes:torch.Tensor,sample_num:int):\n '''\n A:torch.Tesor, 所有待选邻居节点(一个节点的所有邻居节点是包括它自己本身的)的邻接矩阵,\n 行列数一样,对角线上都是1,即自己和自己连接\n previous_nodes: 上一层的节点在矩阵A中的index,而不是global id, 要求以在A中的ID从小到大的顺序排列\n sample_num:每层节点采样的最大值\n\n adj:adj用于前向传播中\n adj.dtype torch.float32\n sampled_nodes(torch.Tensor): 用于下一层采样,对应A中的index,不是global id\n '''\n s_num = min(A.shape[0], sample_num)\n sampled_nodes = torch.randperm(A.shape[0])[:s_num].sort().values\n if A.layout is torch.sparse_coo:\n adj = A.index_select(0, previous_nodes).index_select(1, sampled_nodes)\n else:\n adj = A[previous_nodes, :][:, sampled_nodes]\n adj = row_normalize(adj)\n\n # previous_index = torch.where(torch.isin(sampled_nodes, torch.tensor(previous_nodes)))[0]\n\n return adj, sampled_nodes\n\n# 层重要性采样\ndef layer_importance_sampling(A:torch.Tensor, previous_nodes:torch.Tensor, sample_num:int):\n '''\n A:torch.Tesor, 所有待选邻居节点(一个节点的所有邻居节点是包括它自己本身的)的邻接矩阵,\n 行列数一样,对角线上都是1,即自己和自己连接\n previous_nodes: 上一层的节点在矩阵A中的index,而不是global id, 要求以在A中的ID从小到大的顺序排列\n sample_num:每层节点采样的最大值\n\n adj:adj用于前向传播中\n adj.dtype torch.float32\n sampled_nodes(torch.Tensor): 用于下一层采样,对应A中的index,不是global id\n '''\n lap = normalize(A)\n lap_sq = torch.mul(lap, lap)\n if A.layout is torch.sparse_coo:\n lap_sq = lap_sq.index_select(0, previous_nodes)\n pi = torch.sparse.sum(lap_sq, dim=0).to_dense()\n p = pi / torch.sum(pi)\n s_num = min(A.shape[0], sample_num)\n sampled_nodes = torch.multinomial(p, s_num, replacement=False)\n sampled_nodes = torch.sort(sampled_nodes)[0]\n adj = lap.index_select(0, previous_nodes).index_select(1, sampled_nodes)\n adj = adj.coalesce()\n adj = torch.sparse.FloatTensor(adj.indices(), adj.values() / p[sampled_nodes[adj.indices()[1]]], adj.size())\n else:\n pi = torch.sum(lap_sq[previous_nodes, :], dim=0)\n p = pi / torch.sum(pi)\n s_num = min(A.shape[0], sample_num)\n sampled_nodes = torch.multinomial(p, s_num, replacement=False)\n sampled_nodes = torch.sort(sampled_nodes)[0]\n adj = lap[previous_nodes, :][:, sampled_nodes]\n adj = torch.mul(adj, 1/p[sampled_nodes])\n adj = row_normalize(adj)\n\n return adj, sampled_nodes\n\nif __name__ == \"__main__\":\n data = None\n dataset = MyDataset(data) #data是worker i上的训练集节点id\n\n batch_size = 128\n #生成批处理数据batch\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)", "repo_name": "whr819987540/fs_gnn", "sub_path": "helper/sampler.py", "file_name": "sampler.py", "file_ext": "py", "file_size_in_byte": 7129, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 5, "usage_type": "name"}, {"api_name": "torch.sparse_coo", "line_number": 48, "usage_type": "attribute"}, {"api_name": "torch.sparse.sum", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 50, "usage_type": "attribute"}, {"api_name": "torch.sparse.FloatTensor", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.sparse_coo", "line_number": 58, "usage_type": "attribute"}, {"api_name": "torch.sparse.sum", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 60, "usage_type": "attribute"}, {"api_name": "torch.pow", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.isinf", "line_number": 62, "usage_type": "call"}, {"api_name": "torch.sparse.FloatTensor", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 63, "usage_type": "attribute"}, {"api_name": "torch.pow", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.isinf", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.diag", "line_number": 69, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.randperm", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 92, "usage_type": "call"}, {"api_name": "torch.unique", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.where", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.isin", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 97, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 103, "usage_type": "attribute"}, {"api_name": "torch.randperm", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.sparse_coo", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.Tensor", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.mul", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.sparse_coo", "line_number": 140, "usage_type": "attribute"}, {"api_name": "torch.sparse.sum", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 142, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.sparse.FloatTensor", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.sparse", "line_number": 149, "usage_type": "attribute"}, {"api_name": "torch.sum", "line_number": 151, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 152, "usage_type": "call"}, {"api_name": "torch.multinomial", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.sort", "line_number": 155, "usage_type": "call"}, {"api_name": "torch.mul", "line_number": 157, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 168, "usage_type": "call"}]} {"seq_id": "23365976075", "text": "# _*_ coding:utf-8 _*_\n__author__ = 'geda'\n__date__ = '2020/2/4 20:22'\nimport re\n\nfrom django import forms\nfrom operation.models import UserAsk\n\n\nclass UserAskForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = UserAsk\n\t\tfields = ['name', 'mobile', 'course_name']\n\n\tdef clean_mobile(self):\n\t\tmobile = self.cleaned_data['mobile']\n\t\tp = re.compile('^0\\d{2,3}\\d{7,8}$|^1[358]\\d{9}$|^147\\d{8}')\n\t\tif p.match(mobile):\n\t\t\treturn mobile\n\t\traise forms.ValidationError('手机号码格式不正确', code='mobile_inval')", "repo_name": "Guogeda/muxue", "sub_path": "apps/organization/forms.py", "file_name": "forms.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "47", "api": [{"api_name": "django.forms.ModelForm", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 10, "usage_type": "name"}, {"api_name": "operation.models.UserAsk", "line_number": 12, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 17, "usage_type": "call"}, {"api_name": "django.forms.ValidationError", "line_number": 20, "usage_type": "call"}, {"api_name": "django.forms", "line_number": 20, "usage_type": "name"}]} {"seq_id": "33711083780", "text": "import time\nfrom datetime import timedelta\n\nimport tweepy\nfrom selenium.common import TimeoutException\n\nfrom twitter_data.core.base import BaseLinkedinBot\nfrom twitter_data.core.twitter_reader import TwitterReaderBot\nfrom twitter_data.models import TwitterUser\nfrom django.contrib.auth import get_user_model\nfrom django.core.management import BaseCommand, CommandError\nfrom django.conf import settings\n\n\n\n\n\nclass Command(BaseCommand):\n help = 'Start the stream of the bot telling everyone to quit'\n\n def handle(self, *args, **options):\n start = time.time()\n bot = TwitterReaderBot()\n self.stdout.write(\"getting users\")\n bot.log(\"Starting\")\n users = TwitterUser.objects.order_by('user_profile').all()\n\n bot.go_to_twitter()\n for index,user in enumerate(users):\n bot.log(f\"Going to user no{index+1} {user.user_profile}\")\n try:\n bot.go_to_user(user.user_profile)\n except TimeoutException:\n bot.log('Took too long to get user page')\n continue\n bot.random_wait()\n try:\n tweet_page = bot.go_to_last_tweet()\n except TimeoutException:\n bot.log('Took to long to get last tweet')\n continue\n if tweet_page:\n tweet = bot.get_or_save_tweet(user=user)\n if not tweet or tweet.replied:\n if not tweet:\n bot.log(\"No tweet found\")\n elif tweet.replied:\n bot.log(\"Latest tweet already replied to\")\n continue\n else:\n bot.log(f\"Found a new tweet, replying\")\n res = bot.reply_quit(user)\n if res:\n tweet.replied = True\n tweet.save()\n bot.log(\"Done replying\")\n bot.random_wait()\n bot.log('All users done')\n self.stdout.write(\"task finished\")\n end = time.time()\n bot.log(f\"{timedelta(seconds=(end-start))}\")\n\n\n\n\n", "repo_name": "elam91/quitbot", "sub_path": "twitter_data/management/commands/tell_them_to_quit.py", "file_name": "tell_them_to_quit.py", "file_ext": "py", "file_size_in_byte": 2123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "django.core.management.BaseCommand", "line_number": 18, "usage_type": "name"}, {"api_name": "time.time", "line_number": 22, "usage_type": "call"}, {"api_name": "twitter_data.core.twitter_reader.TwitterReaderBot", "line_number": 23, "usage_type": "call"}, {"api_name": "twitter_data.models.TwitterUser.objects.order_by", "line_number": 26, "usage_type": "call"}, {"api_name": "twitter_data.models.TwitterUser.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "twitter_data.models.TwitterUser", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.common.TimeoutException", "line_number": 33, "usage_type": "name"}, {"api_name": "selenium.common.TimeoutException", "line_number": 39, "usage_type": "name"}, {"api_name": "time.time", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 61, "usage_type": "call"}]} {"seq_id": "35265002624", "text": "\nimport logging\n\nimport pandas as pd\nfrom path import Path\n\nlg = logging.getLogger(__name__)\n\ndef save(dbdir, **tables):\n dbdir = Path(dbdir).expanduser()\n dbdir.makedirs_p()\n lg.info(\"saving to db dir: %s\" % dbdir)\n assert dbdir.isdir()\n for name, table in tables.items():\n lg.info(\"saving %s (%d records)\", name, len(table))\n table.to_pickle(dbdir / '%s.pickle' % (name))\n table.describe().to_csv(\n dbdir / '%s.describe.tsv' % (name), sep=\"\\t\")\n\ndef load(db, name):\n dbdir = Path(db).expanduser()\n return pd.read_pickle(dbdir / ('%s.pickle' % name))\n \n \n", "repo_name": "mfiers/nfj", "sub_path": "nfj/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 615, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 10, "usage_type": "call"}, {"api_name": "path.Path", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 22, "usage_type": "call"}]} {"seq_id": "24583004705", "text": "\nfrom .data import Data\n\nfrom presentation.view import FileServer, SyncServer\n\nfrom .urls import *\nfrom .utils import *\nfrom .views import *\n\nimport logging\n\n\nlogging.basicConfig(filename=\"application/logs/p2p.log\",\n filemode='a',\n format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n datefmt='%H:%M:%S',\n level=logging.DEBUG)\n\n\n\nclass Manager:\n\n\t@staticmethod\n\tdef start():\n\n\n\t\tfile_server = FileServer(host='0.0.0.0')\n\t\tsync_server = SyncServer(host='0.0.0.0')\n\n\t\tport1 = file_server.start()\n\t\tport2 = sync_server.start()\n\t\twith open(Data.node_config, 'w') as outfile:\n\t\t\tjson.dump({\"nodes\":{\"0.0.0.0\":{\"ip\":\"0.0.0.0\",\"port\":port1,\"active\":False}}},outfile)\n\n\t\tprint(\"listening on 0.0.0.0:%s TCP\"%port1)\n\t\tprint(\"listening on 0.0.0.0:%s UDP\"%port2)\n\t\tlogging.info(\"listening on 0.0.0.0:%s TCP\"%port1)\n\t\tlogging.info(\"listening on 0.0.0.0:%s UDP\"%port2)\n\t\twhile True:\n\t\t\tprint(\">\",end=\"\")\n\t\t\tcommand = input()\n\t\t\tif \"-l\" in command:\n\t\t\t\tData.cluster_list = command[command.index(\"-l\")+1]\n\t\t\tif \"-d\" in command:\n\t\t\t\tData.directory = command[command.index(\"-d\")+1]\n\t\t\tif command == \"list\":\n\t\t\t\tprint(get_cluster())\n\n\t\t\telif len(command.split()) > 1 and command.split()[0] == \"get\":\n\t\t\t\trequest_to_all(command.split()[1])\n\n", "repo_name": "bateternal/p2p", "sub_path": "application/node/manager.py", "file_name": "manager.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "47", "api": [{"api_name": "logging.basicConfig", "line_number": 13, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 17, "usage_type": "attribute"}, {"api_name": "presentation.view.FileServer", "line_number": 27, "usage_type": "call"}, {"api_name": "presentation.view.SyncServer", "line_number": 28, "usage_type": "call"}, {"api_name": "data.Data.node_config", "line_number": 32, "usage_type": "attribute"}, {"api_name": "data.Data", "line_number": 32, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 37, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 38, "usage_type": "call"}, {"api_name": "data.Data.cluster_list", "line_number": 43, "usage_type": "attribute"}, {"api_name": "data.Data", "line_number": 43, "usage_type": "name"}, {"api_name": "data.Data.directory", "line_number": 45, "usage_type": "attribute"}, {"api_name": "data.Data", "line_number": 45, "usage_type": "name"}]} {"seq_id": "42333547977", "text": "import logging, requests, tqdm, random, boto3, instances, json\nfrom typing import List\n\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO, format='%(levelname)s - %(message)s')\n\n# Get the AWS instance base url\nINSTANCE = instances.retreive_instance()\nINSTANCE.load()\nDNS_NAME = INSTANCE.public_dns_name\nBASE_URL = 'http://' + DNS_NAME\n\n\ndef benchmark(tech_1: str, tech_2: str, dataset: List[str], repeat: int):\n '''\n Runs MapReduce tasks for provided technologies.\n\n Parameters:\n tech_1 (str): 1st technology to compare ('hadoop', 'spark' or 'linux')\n tech_2 (str): 2nd technology to compare ('hadoop', 'spark' or 'linux')\n tech_2 (List[str]): List of input filenames\n repeat (int): Number of times to repeat each comparison\n\n Returns:\n results (dict): The comparison results\n '''\n # Fetch result n number of times, for each technologies, over all input files\n results = {}\n with tqdm.tqdm(total=len(dataset)*2*repeat) as pbar:\n for data in dataset:\n results[data] = {tech_1: [], tech_2: []}\n for i in range(repeat):\n for t in (tech_1, tech_2):\n resp = requests.get(BASE_URL + '/' + t + '/wordcount/' + data)\n results[data][t].append(resp.json()['time'])\n pbar.update(1)\n\n # Parse the results\n parsed_results = {\n 'per_file': {},\n 'total': {}\n }\n totals = {tech_1: 0.0, tech_2: 0.0}\n for file in results:\n totals_file = {tech_1: 0.0, tech_2: 0.0}\n for tech in results[file]:\n for test in results[file][tech]:\n totals[tech] += test['elapsed']\n totals_file[tech] += test['elapsed']\n parsed_results['per_file'][file] = {\n tech_1: { 'average_time': totals_file[tech_1] / repeat },\n tech_2: { 'average_time': totals_file[tech_2] / repeat }\n }\n parsed_results['total'] = {\n tech_1: { 'average_time': totals[tech_1] / ( repeat * len(dataset) ) },\n tech_2: { 'average_time': totals[tech_2] / ( repeat * len(dataset) ) }\n }\n return parsed_results\n\n\n\ndef main():\n '''\n The main function of the bechmarking script.\n\n This function will execute two comparisons and output rhe results in files.\n The first comparison is Hadoop vs Linux, which runs 10 times with \n 'pg4300.txt' file as input, which means that a total of 20 MapReduce tasks\n are done for thsi comparison.\n\n The second comparison is Hadoop vs Spark, which runs 3 times for every file\n provided in the dataset (total of 9 files), which means that a total of \n 54 MapReduce task are done for this comparison.\n\n Files generated by this functions are:\n - hadoop_vs_linux.json\n - hadoop_vs_spark.json\n '''\n\n # Run Hadoop vs Linux benchmark\n logging.info('HADOOP vs LINUX')\n hadoop_vs_linux = benchmark(\n 'hadoop',\n 'linux', \n [\n 'pg4300.txt'\n ],\n 10\n )\n with open('hadoop_vs_linux.json', 'w') as file:\n file.write(json.dumps(hadoop_vs_linux, indent=4))\n logging.info('Results written to hadoop_vs_linux.json.')\n\n # Run Hadoop vs Spark benchmark\n logging.info('HADOOP vs SPARK')\n hadoop_vs_spark = benchmark(\n 'hadoop',\n 'spark', \n [\n 'buchanj-midwinter-00-t.txt',\n 'carman-farhorizons-00-t.txt',\n 'charlesworth-scene-00-t.txt',\n 'cheyneyp-darkbahama-00-t.txt',\n 'colby-champlain-00-t.txt',\n 'delamare-bumps-00-t.txt',\n 'delamare-lucy-00-t.txt',\n 'delamare-myfanwy-00-t.txt',\n 'delamare-penny-00-t.txt'\n ],\n 3\n )\n with open('hadoop_vs_spark.json', 'w') as file:\n file.write(json.dumps(hadoop_vs_spark, indent=4))\n logging.info('Results written to hadoop_vs_spark.json.')\n\n\n\nif __name__ == \"__main__\":\n main()", "repo_name": "JordMim/LOG8415E", "sub_path": "tp2/benchmark/benchmark.py", "file_name": "benchmark.py", "file_ext": "py", "file_size_in_byte": 3991, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "logging.basicConfig", "line_number": 6, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 6, "usage_type": "attribute"}, {"api_name": "instances.retreive_instance", "line_number": 9, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 15, "usage_type": "name"}, {"api_name": "tqdm.tqdm", "line_number": 30, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 82, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 92, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 93, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 96, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 114, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 115, "usage_type": "call"}]} {"seq_id": "14170541946", "text": "import random\n\nfrom colour import Color\n\nfrom manim import *\n\n\nclass SweepingLine(Scene):\n def construct(self):\n growing_circle = Circle(radius=0.001)\n\n moving_line = Line(start=[-7, 6, 0], end=[-6, -6, 0])\n moving_line.normal_vector = (\n moving_line.copy().rotate(90 * DEGREES).get_vector()\n )\n\n def opacity_updater(obj: Mobject):\n if sum( # check whether dot is inside circle\n (growing_circle.points[0] - growing_circle.get_center()) ** 2\n ) >= sum((obj.get_center() - growing_circle.get_center()) ** 2):\n obj.set_fill(BLUE, opacity=1)\n obj.clear_updaters()\n obj.add_updater(color_updater)\n # self.add_sound()\n\n def color_updater(obj: Mobject):\n if np.dot( # check whether point is *right* of the line\n obj.get_center(), moving_line.normal_vector\n ) < np.dot(moving_line.get_start(), moving_line.normal_vector):\n if obj.color != Color(BLUE):\n obj.set_color(BLUE)\n # self.add_sound()\n else:\n if obj.color != Color(YELLOW):\n obj.set_color(YELLOW)\n\n self.add(growing_circle)\n\n for _ in range(30):\n p = Dot(fill_opacity=0.6)\n p.move_to([random.uniform(-6, 6), random.uniform(-4, 4), 0])\n p.add_updater(opacity_updater)\n self.add(p)\n\n self.play(\n growing_circle.animate.scale_to_fit_width(\n 1.5 * config.frame_width\n ),\n run_time=5,\n )\n\n self.play(Create(moving_line))\n self.play(moving_line.animate.shift(14 * RIGHT), run_time=5)\n self.play(moving_line.animate.shift(14 * LEFT), run_time=5)\n\n", "repo_name": "tehutahu/manim_practice", "sub_path": "example_scenes/practice/tutorialE06.py", "file_name": "tutorialE06.py", "file_ext": "py", "file_size_in_byte": 1821, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "colour.Color", "line_number": 30, "usage_type": "call"}, {"api_name": "colour.Color", "line_number": 34, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 41, "usage_type": "call"}]} {"seq_id": "24465236358", "text": "import torch\nimport sys\n# absolute path to utilities\nsys.path.insert(0, 'C:\\\\Users\\\\ahmed.saidani\\\\Desktop\\\\FMLB\\\\server\\\\fml\\\\utils')\nimport torch.nn as nn\nfrom models import CifarCnn, MnistCnn\nfrom data_loaders import CifarDataLoader, MnistDataLoader\nfrom config import Config\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torch.optim as optim\nimport syft as sy # <-- NEW: import the Pysyft library\nimport os, psutil\nimport time\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nimport GPUtil\n\n# declare the config, model, dataset, optimizer, and criterion\nconfig = Config(sys.argv)\n\n# supports both mnist and cifar datasets\nif config.dataset ==\"CIFAR\":\n model = CifarCnn()\nelse:\n model = MnistCnn()\n\nif config.dataset ==\"CIFAR\":\n dataloader = CifarDataLoader(sys.argv)\nelse:\n dataloader = MnistDataLoader(sys.argv)\n\noptimizer = optim.SGD(model.parameters(), lr=config.lr)\ncriterion = nn.CrossEntropyLoss()\n\n# declare clients\nhook = sy.TorchHook(torch) \nclient_one = sy.VirtualWorker(hook, id=\"client_one\") \nclient_two = sy.VirtualWorker(hook, id=\"client_two\") \n\n# workers and gpu config\nuse_cuda = config.gpu and torch.cuda.is_available()\ntorch.manual_seed(1)\ndevice = torch.device(\"cuda\" if use_cuda else \"cpu\")\nkwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n\n# load federated dataset\nfederated_train_loader = sy.FederatedDataLoader(dataloader.train_dataset.federate((client_one, client_two)), batch_size=config.batch_size, shuffle=True, **kwargs)\n\n# define training\ndef train():\n for i in range(config.epochs):\n model.train()\n for batch_idx, (data, target) in enumerate(federated_train_loader): \n model.send(data.location) \n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n model.get() \n\n# define testing\ndef test():\n model.eval()\n total_loss = 0\n correct = 0\n total = 0\n y_true = []\n y_pred = []\n with torch.no_grad():\n for data, target in dataloader.test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n y_true.extend(target.numpy())\n _, predicted = torch.max(output.data, 1)\n y_pred.extend(predicted.cpu().numpy())\n total_loss += criterion(output, target).item() \n pred = output.argmax(1, keepdim=True) \n correct += pred.eq(target.view_as(pred)).sum().item()\n total += target.size(0)\n # get accuracy metrics \n accuracy = str(100 * correct/total)\n total_loss = str(total_loss/total)\n precision=str(100 * sum(precision_score(y_true, y_pred, average=None))/len(dataloader.train_dataset.classes))\n recall=str(100 * sum(recall_score(y_true, y_pred, average=None))/len(dataloader.train_dataset.classes))\n fone=str(100 * sum(f1_score(y_true, y_pred, average=None))/len(dataloader.train_dataset.classes))\n return accuracy, total_loss, precision, recall, fone\n\n# define benchmarking function \ndef benchmark():\n # declare start time, cpu and network use at the begining \n start_time = time.time()\n old_network = psutil.net_io_counters().bytes_recv + psutil.net_io_counters().bytes_sent\n old_cpu = psutil.cpu_percent(interval=None)\n # scrape metrics\n train()\n accuracy, total_loss, precision, recall, fone = test()\n execution_time = time.time() - start_time\n cpu = str(psutil.cpu_percent(interval=None))\n execution_time = str(execution_time)\n new_network = psutil.net_io_counters().bytes_recv + psutil.net_io_counters().bytes_sent\n network = str(new_network - old_network)\n memory = str(psutil.Process(os.getpid()).memory_info().rss / 1024 ** 2)\n if config.gpu == True:\n GPUs = GPUtil.getGPUs()\n gpu = str(GPUs[0].load * 100)\n else:\n gpu = \"0\"\n # log metrics\n data = '{ library: pysyft; \\n accuracy: ' + accuracy + '; \\n loss: ' + total_loss + '; \\n recall: ' + recall + '; \\n precision: ' + precision + '; \\n f1: ' + fone + '; \\n time: ' + execution_time + '; \\n network: ' + network + '; \\n memory: ' + memory + '; \\n cpu: ' + cpu + '; \\n gpu: ' + gpu +\"; \\n }\"\n print(data)\n sys.stdout.flush()\n \n# run\nbenchmark()\n\n", "repo_name": "sdn98/BFML", "sub_path": "server/fml/libraries/pysyft/image_classifier_cnn.py", "file_name": "image_classifier_cnn.py", "file_ext": "py", "file_size_in_byte": 4363, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "sys.path.insert", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "config.Config", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 19, "usage_type": "attribute"}, {"api_name": "config.dataset", "line_number": 22, "usage_type": "attribute"}, {"api_name": "models.CifarCnn", "line_number": 23, "usage_type": "call"}, {"api_name": "models.MnistCnn", "line_number": 25, "usage_type": "call"}, {"api_name": "config.dataset", "line_number": 27, "usage_type": "attribute"}, {"api_name": "data_loaders.CifarDataLoader", "line_number": 28, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "data_loaders.MnistDataLoader", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.optim.SGD", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 32, "usage_type": "name"}, {"api_name": "config.lr", "line_number": 32, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 33, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 33, "usage_type": "name"}, {"api_name": "syft.TorchHook", "line_number": 36, "usage_type": "call"}, {"api_name": "syft.VirtualWorker", "line_number": 37, "usage_type": "call"}, {"api_name": "syft.VirtualWorker", "line_number": 38, "usage_type": "call"}, {"api_name": "config.gpu", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.cuda.is_available", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 41, "usage_type": "attribute"}, {"api_name": "torch.manual_seed", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 43, "usage_type": "call"}, {"api_name": "syft.FederatedDataLoader", "line_number": 47, "usage_type": "call"}, {"api_name": "config.batch_size", "line_number": 47, "usage_type": "attribute"}, {"api_name": "config.epochs", "line_number": 51, "usage_type": "attribute"}, {"api_name": "torch.no_grad", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 76, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 85, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 86, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 93, "usage_type": "call"}, {"api_name": "psutil.net_io_counters", "line_number": 94, "usage_type": "call"}, {"api_name": "psutil.cpu_percent", "line_number": 95, "usage_type": "call"}, {"api_name": "time.time", "line_number": 99, "usage_type": "call"}, {"api_name": "psutil.cpu_percent", "line_number": 100, "usage_type": "call"}, {"api_name": "psutil.net_io_counters", "line_number": 102, "usage_type": "call"}, {"api_name": "psutil.Process", "line_number": 104, "usage_type": "call"}, {"api_name": "os.getpid", "line_number": 104, "usage_type": "call"}, {"api_name": "config.gpu", "line_number": 105, "usage_type": "attribute"}, {"api_name": "GPUtil.getGPUs", "line_number": 106, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 113, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 113, "usage_type": "attribute"}]} {"seq_id": "16983924031", "text": "import itertools\nfrom _operator import mul\n\ndef transpose(data):\n return list(zip(*data))\n\nres = transpose([[1,2],[3,-1]])\nprint(res)\n\n\n\ndef transpose(it):\n return itertools.zip_longest(it[0], it[1])\n\n\ndef scalar_product(a, b):\n a1 = []\n a2 = []\n for i in a:\n if type(i) == int or type(i) == float:\n a1.append(i)\n elif type(i) == str:\n if i.isdigit():\n a1.append(float(i))\n else:\n return None\n else:\n return None\n for i in b:\n if type(i) == int or type(i) == float:\n a2.append(i)\n elif type(i) == str:\n if i.isdigit():\n a2.append(float(i))\n else:\n return None\n else:\n return None\n return sum(itertools.starmap(mul, itertools.zip_longest(a1, a2)))\n\n", "repo_name": "Ginix4ever/GitProgram", "sub_path": "Gitpython/3.1iter_helpers.py", "file_name": "3.1iter_helpers.py", "file_ext": "py", "file_size_in_byte": 859, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "itertools.zip_longest", "line_number": 13, "usage_type": "call"}, {"api_name": "itertools.starmap", "line_number": 39, "usage_type": "call"}, {"api_name": "_operator.mul", "line_number": 39, "usage_type": "argument"}, {"api_name": "itertools.zip_longest", "line_number": 39, "usage_type": "call"}]} {"seq_id": "10454801904", "text": "import numpy as np\nimport os\nimport torch\nimport torch.nn as nn\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\nfrom ptb_common import load_model, init_device, ptb_raw_data, ModelInfo, Batch, ptb_iterator, repackage_hidden\nfrom models import RNN, GRU\nfrom models import make_model as TRANSFORMER\nfrom sklearn.preprocessing import minmax_scale\n\n\ndef compute_grad_per_timestep(model, device, data, loss_fn):\n \"\"\"\n One epoch of training/validation (depending on flag is_train).\n \"\"\"\n model.eval()\n\n # LOOP THROUGH MINIBATCHES\n for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.seq_len)):\n\n hidden = model.init_hidden()\n hidden = hidden.to(device)\n\n inputs = torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous().to(device)\n model.zero_grad()\n hidden = repackage_hidden(hidden)\n outputs, hidden = model(inputs, hidden)\n\n targets = torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous().to(device)\n # LOSS COMPUTATION\n last_loss = loss_fn(outputs[-1], targets[-1])\n\n # Compute gradient with respect to last loss. Average gradients in batch\n grads = torch.empty(0).to(device)\n for layer_states in model.state_history:\n batch_grads = torch.autograd.grad(last_loss, layer_states, retain_graph=True)\n avg_grads = [torch.mean(bg, 0) for bg in batch_grads]\n # Gradients for each layer concatenated into a single vector\n grads = torch.cat((grads, torch.stack(avg_grads)), dim=1)\n\n return [s.norm() for s in grads]\n\n\ndef plot_loss_per_step(model_infos, grads_by_model):\n\n results_folder = './5_results'\n x = np.arange(35) + 1\n\n for model_info, grads in zip(model_infos, grads_by_model):\n grads = minmax_scale(grads)\n plt.plot(x, grads, '-o', label=model_info.model)\n plt.title(\"$\\\\nabla h_t L_T$ Gradient Normal Vs. Timestep\")\n plt.ylabel(\"Gradient Norm (Scaled)\")\n plt.xlabel(\"Timestep\")\n plt.legend()\n file_name = 'grads_per_timestep.png'\n plt.savefig(os.path.join(results_folder, file_name), bbox_inches='tight', pad_inches=0.2)\n plt.clf()\n plt.close()\n\n\ndef compute_grad_per_timestep_by_model():\n device = init_device()\n train_data, valid_data, test_data, word_to_id, id_2_word = ptb_raw_data(data_path='data')\n vocab_size = len(word_to_id)\n loss_fn = nn.CrossEntropyLoss()\n # # Models from 4_1\n model_infos = [ModelInfo('RNN', 'ADAM', 0.0001, 20, 35, 1500, 2, 0.35),\n ModelInfo('GRU', 'SGD_LR_SCHEDULE', 10, 20, 35, 1500, 2, 0.35)]\n\n grads_by_model = []\n for model_info in model_infos:\n model = load_model(model_info, device, vocab_size)\n model.track_state_history = True\n grads_per_step = compute_grad_per_timestep(model, device, valid_data, loss_fn)\n grads_by_model.append(grads_per_step)\n\n # np.save('grads_by_model.npy', grads_by_model)\n # grads_by_model = np.load('grads_by_model.npy')\n plot_loss_per_step(model_infos, grads_by_model)\n\n\nif __name__ == '__main__':\n print('5.2 - Gradient per timestep')\n compute_grad_per_timestep_by_model()\n\n\n\n", "repo_name": "houdridi/IFT6135PracticalAssignments", "sub_path": "a2/5_2_grad_per_timestep.py", "file_name": "5_2_grad_per_timestep.py", "file_ext": "py", "file_size_in_byte": 3191, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "ptb_common.ptb_iterator", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 25, "usage_type": "attribute"}, {"api_name": "ptb_common.repackage_hidden", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.int64", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.empty", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 37, "usage_type": "attribute"}, {"api_name": "torch.mean", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 48, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.minmax_scale", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path", "line_number": 58, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "ptb_common.init_device", "line_number": 64, "usage_type": "call"}, {"api_name": "ptb_common.ptb_raw_data", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 67, "usage_type": "name"}, {"api_name": "ptb_common.ModelInfo", "line_number": 69, "usage_type": "call"}, {"api_name": "ptb_common.ModelInfo", "line_number": 70, "usage_type": "call"}, {"api_name": "ptb_common.load_model", "line_number": 74, "usage_type": "call"}]} {"seq_id": "32418162182", "text": "from matplotlib.figure import Figure\nfrom typing import List, Union, Dict, Callable, Tuple, Optional\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom PyQt5.QtWidgets import *\nfrom matplotlib.lines import Line2D\nfrom matplotlib.backend_bases import PickEvent, MouseEvent\nfrom hyperclass.util.config import tostr\nfrom PyQt5.QtCore import *\nfrom matplotlib.axes import Axes\nfrom collections import OrderedDict\nfrom hyperclass.data.events import dataEventHandler, DataType\nfrom hyperclass.gui.events import EventClient, EventMode\nfrom hyperclass.gui.labels import labelsManager\nimport xarray as xa\n\nclass Spectrum:\n def __init__(self, band_values: List[float], color: List[float], cid: int ):\n self.bands = band_values\n self.color = color\n self.cid = cid\n\n def isTransient(self):\n return self.cid == 0\n\nclass SpectralCanvas( FigureCanvas ):\n\n def __init__(self, figure: Figure ):\n FigureCanvas.__init__( self, figure )\n self.figure = figure\n self.figure.patch.set_facecolor('#e2e2e2')\n\nclass SpectralPlot(QObject,EventClient):\n update_signal = pyqtSignal()\n\n def __init__( self, active: bool = True, **kwargs ):\n QObject.__init__(self)\n self.figure: Optional[Figure] = None\n self._active = active\n self.overlay = kwargs.get('overlay', False )\n self.axes: Optional[Axes] = None\n self.lines: OrderedDict[ int, Line2D ] = OrderedDict()\n self.current_line: Optional[Line2D] = None\n self.current_pid = -1\n self.current_cid = -1\n self.norm = None\n\n self.plotx: xa.DataArray = None\n self.nploty: xa.DataArray = None\n self.ploty: xa.DataArray = None\n\n self.rplotx: xa.DataArray = None\n self.rploty: xa.DataArray = None\n\n self._use_reduced_data = False\n self.marker: Line2D = None\n self._gui = None\n self._titles = None\n self.parms = kwargs\n self.update_signal.connect( self.update )\n\n def useReducedData(self, useReducedData: bool ):\n if self._use_reduced_data != useReducedData:\n self._use_reduced_data = useReducedData\n self.plot_spectrum()\n self.update()\n\n def toggleUseReducedData( self ):\n self._use_reduced_data = not self._use_reduced_data\n self.plot_spectrum()\n self.update()\n\n def activate( self, active: bool ):\n self._active = active\n if self._active and (self.current_pid >= 0):\n event = dict( event=\"pick\", type=\"graph\", pids=[self.current_pid], cid=0 )\n self.submitEvent(event, EventMode.Gui)\n\n def init( self ):\n self.figure = Figure(constrained_layout=True)\n self.axes = self.figure.add_subplot(111)\n self.axes.title.set_fontsize(14)\n self.activate_event_listening()\n\n def configure(self, event: Dict ):\n type = self.ploty.attrs.get('type')\n self.axes.set_facecolor((0.0, 0.0, 0.0))\n if type == 'spectra':\n plot_metadata = dataEventHandler.getMetadata( event )\n self._titles = {}\n for index in range( plot_metadata[0].shape[0] ):\n self._titles[index] = \"[\" + \",\".join( [ tostr(pm.values[index]) for pm in plot_metadata ] ) + \"]\"\n else:\n self.figure.patch.set_facecolor( (0.0, 0.0, 0.0) )\n self.axes.axis('off')\n self.axes.get_yaxis().set_visible(False)\n self.figure.set_constrained_layout_pads( w_pad=0., h_pad=0. )\n\n def gui(self, parent) :\n if self._gui is None:\n self.init( )\n self._gui = SpectralCanvas( self.figure )\n self._gui.setParent(parent)\n self._gui.setSizePolicy( QSizePolicy.Expanding, QSizePolicy.Expanding)\n self._gui.setContentsMargins( 0, 0, 0, 0 )\n self._gui.updateGeometry()\n self._gui.mpl_connect('button_press_event', self.mouseClick)\n\n return self._gui\n\n def mouseClick(self, event: MouseEvent):\n if (self.axes is not None) and ( self.current_pid >= 0 ) and ( self.ploty is not None ) and self._active:\n print(f\"SpectralPlot.mousePressEvent: [{event.x}, {event.y}] -> [{event.xdata}, {event.ydata}]\" )\n title = f\" {event.xdata:.2f}: {event.ydata:.3f} \"\n self.axes.set_title( title, {'fontsize': 10 }, 'right' )\n self.update_marker( event.xdata )\n self.update()\n\n def normalize(self):\n self.norm = self.ploty.attrs.get(\"norm\", None)\n if self.norm == \"median\":\n self.nploty = self.ploty / self.ploty.median( axis = 1 )\n elif self.norm == \"mean\":\n self.nploty = self.ploty / self.ploty.mean( axis=1 )\n else:\n self.nploty = self.ploty\n\n def processEvent(self, event: Dict ):\n super().processEvent(event)\n if dataEventHandler.isDataLoadEvent(event):\n plot_data = dataEventHandler.getPointData( event, DataType.Plot )\n# reduced_data = dataEventHandler.getPointData( event, DataType.Embedding )\n if isinstance(plot_data, dict): self.plotx, self.ploty = plot_data[\"plotx\"], plot_data[\"ploty\"]\n else: self.plotx, self.ploty = plot_data.band, plot_data\n# self.rplotx, self.rploty = reduced_data['model'], reduced_data\n if self.ploty.size > 0:\n self.normalize()\n self.configure( event )\n if event.get('event') == 'pick':\n if (event.get('type') in [ 'vtkpoint', 'directory', 'reference', 'plot' ]) and self._active:\n if self.ploty is not None:\n pids = [ row.pid for row in event.get('rows',[]) ]\n pids = pids + event.get('pids',[])\n for pid in pids:\n if pid >= 0:\n self.current_pid = pid\n current_line = self.lines.get( self.current_pid, None )\n if (current_line is not None) and (current_line.cid > 0):\n self.current_cid = current_line.cid\n else:\n classification = event.get('classification',-1)\n self.current_cid = classification if (classification > 0) else labelsManager.selectedClass\n self.clear_transients()\n print( f\"SpectralPlot: pick event, pid = {self.current_pid}, cid = {self.current_cid}\")\n self.plot_spectrum()\n if self._titles is not None:\n self.axes.set_title( self._titles.get(self.current_pid,\"*SPECTRA*\" ), {'fontsize': 10 }, 'center' )\n self.update_marker()\n self.axes.set_title( \"\", {}, 'right' )\n self.update_signal.emit()\n break\n elif event.get('event') == 'gui':\n if event.get('type') =='reset':\n self.clear()\n\n def update_marker(self, new_xval = None ):\n if self.marker is not None:\n self.axes.lines.remove(self.marker)\n self.marker = None\n if new_xval is not None:\n self.marker = self.axes.axvline( new_xval, color=\"yellow\", linewidth=1, alpha=0.75 )\n\n def plot_spectrum(self):\n if (self.current_pid >= 0) and (self.nploty is not None):\n color = labelsManager.colors[self.current_cid]\n\n # if self._use_reduced_data:\n # spectrum = self.rploty[self.current_pid].values\n # x = self.rplotx[ self.current_pid ].values if self.rplotx.ndim == 2 else self.rplotx.values\n # else:\n # spectrum = self.nploty[self.current_pid].values\n # x = self.plotx[ self.current_pid ].values if self.plotx.ndim == 2 else self.plotx.values\n\n spectrum = self.nploty[self.current_pid].values\n x = self.plotx[self.current_pid].values if self.plotx.ndim == 2 else self.plotx.values\n self.ymax, self.ymin = spectrum.max(), spectrum.min()\n self.xmax, self.xmin = x.max(), x.min()\n self.axes.set_ylim(self.ymin, self.ymax)\n self.axes.set_xlim(self.xmin, self.xmax)\n linewidth = 2 if self.overlay else 1\n if len(color) == 4: color[3] = 1.0\n if self.current_line is not None:\n self.current_line.set_visible(False)\n self.current_line, = self.axes.plot( x, spectrum, linewidth=linewidth, color=color )\n print( f\"SPECTRA BOUNDS: [ {self.xmin:.2f}, {self.xmax:.2f} ] -> [ {self.ymin:.2f}, {self.ymax:.2f} ]\")\n self.current_line.color = color\n# self.current_line.mark( self.current_cid )\n self.current_line.cid = self.current_cid\n self.lines[ self.current_pid ] = self.current_line\n\n def clear(self):\n self.lines = OrderedDict()\n self.current_line = None\n self.axes.clear()\n\n def clear_transients(self):\n if (self.current_line is not None):\n if (self.current_line.cid == 0) or not self.overlay:\n index, line = self.lines.popitem()\n line.remove()\n self.current_line = None\n else:\n self.current_line.set_linewidth(1)\n\n def remove_spectrum(self, index: int ):\n line: Line2D = self.lines[ index ]\n line.remove()\n del self.lines[ index ]\n\n def has_spectrum(self, index: int ):\n return index in self.lines\n\n @pyqtSlot()\n def update(self):\n if self._gui is not None:\n self.figure.canvas.draw_idle()\n self._gui.update()\n\n\n\nclass SpectralManager:\n\n def __init__(self):\n self.spectral_plots = []\n self._gui = None\n\n def gui(self, nSpectra: int, parent: QWidget ):\n if self._gui is None:\n self._gui = QTabWidget()\n for iS in range(nSpectra):\n spectral_plot = SpectralPlot(iS == 0)\n self.spectral_plots.append(spectral_plot)\n tabId = \"Spectra\" if iS == 0 else str(iS)\n self._gui.addTab( spectral_plot.gui(parent), tabId )\n self._gui.currentChanged.connect(self.activate_spectral_plot)\n self._gui.setTabEnabled(0, True)\n return self._gui\n\n def activate_spectral_plot( self, index: int ):\n for iS, plot in enumerate(self.spectral_plots):\n plot.activate( iS == index )\n\n def setSpectralUseReduced(self, useReducedData: bool ):\n for spectral_plot in self.spectral_plots:\n spectral_plot.useReducedData( useReducedData )\n\n def toggleSpectralUseReduced(self ):\n for spectral_plot in self.spectral_plots:\n spectral_plot.toggleUseReducedData()\n\n def addActions(self, menu: QMenu ):\n menuButton = QAction( \"Toggle Spectral Reduced/Raw\", self._gui )\n menuButton.setStatusTip( \"Toggle Spectral Use Reduced/Raw Data\" )\n menuButton.triggered.connect(self.toggleSpectralUseReduced)\n menu.addAction( menuButton )\n\nspectralManager = SpectralManager()", "repo_name": "nasa-nccs-cds/hyperclass", "sub_path": "hyperclass/plot/spectra.py", "file_name": "spectra.py", "file_ext": "py", "file_size_in_byte": 11245, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "47", "api": [{"api_name": "typing.List", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.figure.Figure", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 28, "usage_type": "name"}, {"api_name": "hyperclass.gui.events.EventClient", "line_number": 32, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.figure.Figure", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.axes.Axes", "line_number": 40, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 41, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 41, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 42, "usage_type": "name"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 42, "usage_type": "name"}, {"api_name": "xarray.DataArray", "line_number": 47, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 48, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 49, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 51, "usage_type": "attribute"}, {"api_name": "xarray.DataArray", "line_number": 52, "usage_type": "attribute"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 55, "usage_type": "name"}, {"api_name": "hyperclass.gui.events.EventMode.Gui", "line_number": 76, "usage_type": "attribute"}, {"api_name": "hyperclass.gui.events.EventMode", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.figure.Figure", "line_number": 79, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 84, "usage_type": "name"}, {"api_name": "hyperclass.data.events.dataEventHandler.getMetadata", "line_number": 88, "usage_type": "call"}, {"api_name": "hyperclass.data.events.dataEventHandler", "line_number": 88, "usage_type": "name"}, {"api_name": "hyperclass.util.config.tostr", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.backend_bases.MouseEvent", "line_number": 110, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 127, "usage_type": "name"}, {"api_name": "hyperclass.data.events.dataEventHandler.isDataLoadEvent", "line_number": 129, "usage_type": "call"}, {"api_name": "hyperclass.data.events.dataEventHandler", "line_number": 129, "usage_type": "name"}, {"api_name": "hyperclass.data.events.dataEventHandler.getPointData", "line_number": 130, "usage_type": "call"}, {"api_name": "hyperclass.data.events.dataEventHandler", "line_number": 130, "usage_type": "name"}, {"api_name": "hyperclass.data.events.DataType.Plot", "line_number": 130, "usage_type": "attribute"}, {"api_name": "hyperclass.data.events.DataType", "line_number": 130, "usage_type": "name"}, {"api_name": "hyperclass.gui.labels.labelsManager.selectedClass", "line_number": 151, "usage_type": "attribute"}, {"api_name": "hyperclass.gui.labels.labelsManager", "line_number": 151, "usage_type": "name"}, {"api_name": "hyperclass.gui.labels.labelsManager.colors", "line_number": 174, "usage_type": "attribute"}, {"api_name": "hyperclass.gui.labels.labelsManager", "line_number": 174, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 201, "usage_type": "call"}, {"api_name": "matplotlib.lines.Line2D", "line_number": 215, "usage_type": "name"}]} {"seq_id": "14821045818", "text": "import logging\nimport socket\nfrom wsgiref.util import request_uri\n\nfrom pyramid.threadlocal import get_current_request\n\n\n__all__ = ('ContextFilter', 'EnvironFilter')\n\n\nclass ContextFilter(logging.Filter):\n \"\"\"Provides context specific filter values.\n See also https://docs.python.org/3/library/logging.html#filter-objects\n \"\"\"\n hostname = socket.gethostname()\n\n def filter(self, record):\n record.hostname = ContextFilter.hostname\n return True\n\n\nclass EnvironFilter(logging.Filter):\n \"\"\"Exposes the request ``environ`` to the logger.\"\"\"\n\n def _get_defaults(self, environ):\n if environ.get('HTTP_X_FORWARDED_FOR'):\n remote_addr = environ['HTTP_X_FORWARDED_FOR']\n elif environ.get('REMOTE_ADDR'):\n remote_addr = environ['REMOTE_ADDR']\n uri = environ.get('REQUEST_URI', None)\n if uri is None:\n uri = request_uri(environ)\n defaults = {\n 'REMOTE_ADDR': remote_addr,\n 'REMOTE_USER': environ.get('REMOTE_USER') or '-',\n 'REQUEST_URI': uri,\n 'HTTP_VERSION': environ.get('SERVER_PROTOCOL'),\n 'HTTP_REFERER': environ.get('HTTP_REFERER', '-'),\n 'HTTP_USER_AGENT': environ.get('HTTP_USER_AGENT', '-'),\n }\n return defaults\n\n def filter(self, record):\n request = get_current_request()\n # Apply a set of default values to the record.\n for key, value in self._get_defaults(request.environ).items():\n setattr(record, key, value)\n # Apply actual values to the record.\n for key, value in request.environ.items():\n setattr(record, key, value)\n\n # Set the status and byte length to the record.\n response = request.response\n bytes = '-'\n for name, value in response.headers.items():\n if name.lower() == 'content-length':\n bytes = value\n break\n record.bytes = bytes\n record.status = response.status\n\n return True\n", "repo_name": "openstax/pyramid_sawing", "sub_path": "pyramid_sawing/filters.py", "file_name": "filters.py", "file_ext": "py", "file_size_in_byte": 2026, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "47", "api": [{"api_name": "logging.Filter", "line_number": 11, "usage_type": "attribute"}, {"api_name": "socket.gethostname", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.Filter", "line_number": 22, "usage_type": "attribute"}, {"api_name": "wsgiref.util.request_uri", "line_number": 32, "usage_type": "call"}, {"api_name": "pyramid.threadlocal.get_current_request", "line_number": 44, "usage_type": "call"}]} {"seq_id": "11478551349", "text": "'''\nDescription: \nAutor: didiplus\nDate: 2023-02-18 13:39:29\nLastEditors: lin\nLastEditTime: 2023-02-25 09:20:34\n'''\n\n\nfrom aioredis import Redis\nfrom typing import Union,Optional\nfrom config import settings\nclass RedisTools(Redis):\n\n def __init__(self):\n super().__init__(\n host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n password=settings.REDIS_PASSWORD,\n db=settings.REDIS_DATABASE,\n socket_timeout=settings.REDIS_TIMEOUT\n )\n\n async def is_existsKey(self,key:str)->bool:\n \n return True if await self.get(name=key) is not None else False\n \n async def hasKey(self,key:str) ->bool:\n return await self.is_existsKey(key)\n\n async def set_value(self,key:str,value:str,ex=7200)->None:\n await self.set(name=key,value=value,ex=ex)\n \n\n async def get_value(self,key:str) ->str:\n res = await self.get(name=key)\n return str(res,encoding=\"utf-8\")", "repo_name": "likeUtaoki/vx_chatgpt", "sub_path": "core/redis.py", "file_name": "redis.py", "file_ext": "py", "file_size_in_byte": 976, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "47", "api": [{"api_name": "aioredis.Redis", "line_number": 13, "usage_type": "name"}, {"api_name": "config.settings.REDIS_HOST", "line_number": 17, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 17, "usage_type": "name"}, {"api_name": "config.settings.REDIS_PORT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 18, "usage_type": "name"}, {"api_name": "config.settings.REDIS_PASSWORD", "line_number": 19, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 19, "usage_type": "name"}, {"api_name": "config.settings.REDIS_DATABASE", "line_number": 20, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 20, "usage_type": "name"}, {"api_name": "config.settings.REDIS_TIMEOUT", "line_number": 21, "usage_type": "attribute"}, {"api_name": "config.settings", "line_number": 21, "usage_type": "name"}]} {"seq_id": "72022118544", "text": "from flask import render_template, session, redirect, url_for, flash, jsonify, request, Response\nfrom datetime import datetime, timedelta, date\nfrom . import main\nfrom .. import db\nfrom ..models import Update\nfrom .utils import *\n\n@main.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@main.route(\"/receiver\", methods=[\"GET\", \"POST\"])\ndef receiver():\n data = request.get_json(force = True)\n td = getdate()\n savedate = date(day=td.day, month=td.month, year=td.year)\n update = Update(user = data['user'], count = data['count'], date= savedate)\n db.session.add(update)\n db.session.commit()\n response = Response(status=200)\n return response\n\n@main.route(\"/api/getuserdata/\")\ndef getuserdata(user):\n data = Update.query.filter_by(user=user).all()\n data = [i.json() for i in data]\n return jsonify(data)\n\n@main.route(\"/api/getusers\")\ndef getusers():\n data = Update.query.with_entities(Update.user).distinct().all()\n data = [dat.user for dat in data]\n return jsonify(data)\n\n@main.route(\"/api/getdates\")\ndef getdates():\n data = Update.query.with_entities(Update.date).distinct().all()\n data = [str(dat.date) for dat in data]\n return jsonify(data)\n\n@main.route(\"/api/getdata\")\ndef getdata():\n data = Update.query.with_entities(Update.user).distinct().all()\n resp = {}\n for dat in data :\n user = dat.user\n content = Update.query.filter_by(user=user).all()\n resp[user] = [{'x':str(c.date),'y':c.count} for c in content]\n return jsonify(resp)\n\n\ndef getdate():\n time = timedelta(hours=5, minutes=30)\n date = datetime.utcnow()+time\n return date", "repo_name": "DivyanshK12/plotty-public", "sub_path": "app/main/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1646, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "flask.render_template", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 16, "usage_type": "call"}, {"api_name": "models.Update", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 20, "usage_type": "call"}, {"api_name": "models.Update.query.filter_by", "line_number": 25, "usage_type": "call"}, {"api_name": "models.Update.query", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.Update", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 27, "usage_type": "call"}, {"api_name": "models.Update.query.with_entities", "line_number": 31, "usage_type": "call"}, {"api_name": "models.Update.query", "line_number": 31, "usage_type": "attribute"}, {"api_name": "models.Update", "line_number": 31, "usage_type": "name"}, {"api_name": "models.Update.user", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "models.Update.query.with_entities", "line_number": 37, "usage_type": "call"}, {"api_name": "models.Update.query", "line_number": 37, "usage_type": "attribute"}, {"api_name": "models.Update", "line_number": 37, "usage_type": "name"}, {"api_name": "models.Update.date", "line_number": 37, "usage_type": "attribute"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Update.query.with_entities", "line_number": 43, "usage_type": "call"}, {"api_name": "models.Update.query", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Update", "line_number": 43, "usage_type": "name"}, {"api_name": "models.Update.user", "line_number": 43, "usage_type": "attribute"}, {"api_name": "models.Update.query.filter_by", "line_number": 47, "usage_type": "call"}, {"api_name": "models.Update.query", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.Update", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 55, "usage_type": "name"}]} {"seq_id": "8090143957", "text": "from django.shortcuts import render\nfrom django.apps import apps\n\nArticleModel = apps.get_model('article', 'Article')\n\n# Create your views here.\ndef homepage_view(request, *arg, **kwargs):\n\n context = {\n \"articleCollection\": ArticleModel.objects.all(),\n }\n return render(request, 'home.html', context)\n", "repo_name": "NikhilCodes/Django-News", "sub_path": "pages/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 318, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "47", "api": [{"api_name": "django.apps.apps.get_model", "line_number": 4, "usage_type": "call"}, {"api_name": "django.apps.apps", "line_number": 4, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}]} {"seq_id": "28241062996", "text": "import json\nimport re\nimport os\nimport random\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\n\nclass ParserKiller:\n def __init__(self, sentence):\n self.sentence = sentence\n\n def parse_sentence(self):\n \"\"\"\n Keep only the letters and delete all common word\n :return:\n All the work we keep\n \"\"\"\n self.sentence = re.sub(r'[^\\w\\s]', ' ', self.sentence)\n self.sentence = self.sentence.lower()\n list_of_word = self.sentence.split(\" \")\n keep_word = []\n\n with open(os.path.join(dir_path, 'fr.json'), encoding='utf-8') as json_data:\n data_dict = json.load(json_data)\n for word in list_of_word:\n if word not in data_dict:\n keep_word.append(word)\n\n return ' '.join(keep_word)\n\n\ndef select_response(status):\n \"\"\"\n Get randomly an answer in repsponse.json depending on the status\n\n :param status:\n :return:\n response, random_choice\n \"\"\"\n with open(os.path.join(dir_path, 'response.json'), encoding='utf-8') as json_file:\n data = json.load(json_file)\n chosen_status = data[status]\n random_choice = random.randrange(len(chosen_status))\n response = chosen_status[random_choice]\n\n return response, random_choice\n\n\ndef compact_answer(text):\n \"\"\"\n Using a regular expression to get the first sentences of a text\n :param text:\n :return:\n The first sentences\n \"\"\"\n sentences = re.split(r'(?<=[^A-Z].[.?]) +(?=[A-Z])', text)\n return sentences[0].splitlines()[0]\n", "repo_name": "M0l42/OC_P7_GrandpyBot", "sub_path": "grandpybot/chat_bot.py", "file_name": "chat_bot.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "47", "api": [{"api_name": "os.path.dirname", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path", "line_number": 6, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 6, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 42, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 44, "usage_type": "call"}, {"api_name": "re.split", "line_number": 57, "usage_type": "call"}]} {"seq_id": "22676233024", "text": "from rest_framework import serializers\nfrom rest_framework.serializers import Serializer\nfrom djoser.serializers import UserSerializer\n\nfrom apps.adminka.models import News\nfrom apps.payments.serializers import UserServicePlanPaymentSerializer\nfrom apps.user.models import User\n\n\nclass NewsSerializer(serializers.ModelSerializer):\n admin = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n class Meta:\n model = News\n fields = ('id','title', 'body', 'is_published', 'admin', 'publish_date')\n\n\nclass AdminUserList(serializers.ModelSerializer):\n user_type = serializers.SerializerMethodField()\n fio_company_name = serializers.SerializerMethodField()\n iin_bin = serializers.SerializerMethodField()\n active_payment = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = ('user_type',\n 'id',\n 'fio_company_name',\n 'phone_number',\n 'email',\n 'iin_bin',\n 'is_active',\n 'spent_balance',\n 'date_joined',\n 'active_payment')\n\n def get_user_type(self, obj):\n if hasattr(obj, 'individual_user'):\n return 'individual_user'\n if hasattr(obj, 'juridical_user'):\n return 'juridical_user'\n\n def get_active_payment(self, obj):\n if hasattr(obj, 'service_plan_payments'):\n last_payment = obj.service_plan_payments.order_by('created').last()\n return UserServicePlanPaymentSerializer(last_payment).data\n\n def get_fio_company_name(self, obj):\n if hasattr(obj, 'individual_user'):\n return obj.fio\n if hasattr(obj, 'juridical_user'):\n return obj.juridical_user.company_name\n\n def get_iin_bin(self, obj):\n if hasattr(obj, 'individual_user'):\n return obj.individual_user.iin\n if hasattr(obj, 'juridical_user'):\n return obj.juridical_user.bin\n\n\nclass AdminDetailUserSerializer(UserSerializer):\n payment_log = serializers.SerializerMethodField()\n user_type = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = tuple(User.REQUIRED_FIELDS) + (\n 'id',\n 'email',\n 'balance',\n 'spent_balance',\n 'is_admin',\n 'date_joined',\n 'is_active',\n 'payment_log',\n 'user_type'\n )\n\n def get_payment_log(self, obj):\n if hasattr(obj, 'payment_log'):\n success_payment_log_list = obj.payment_log.filter(status='success').values()\n if len(success_payment_log_list) > 0:\n return success_payment_log_list[0]\n return None\n\n def get_user_type(self, obj):\n if hasattr(obj, 'juridical_user'):\n return 'juridical'\n if hasattr(obj, 'individual_user'):\n return 'individual'\n\n\nclass UpdateUserStatus(Serializer):\n is_active = serializers.BooleanField(default=True)\n", "repo_name": "dmk-one/analytics_assistant_demo", "sub_path": "apps/adminka/serializers.py", "file_name": "serializers.py", "file_ext": "py", "file_size_in_byte": 3039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 10, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 10, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HiddenField", "line_number": 11, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 11, "usage_type": "name"}, {"api_name": "rest_framework.serializers.CurrentUserDefault", "line_number": 11, "usage_type": "call"}, {"api_name": "apps.adminka.models.News", "line_number": 14, "usage_type": "name"}, {"api_name": "rest_framework.serializers.ModelSerializer", "line_number": 18, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 18, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 19, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 19, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 20, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 20, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 22, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "apps.user.models.User", "line_number": 25, "usage_type": "name"}, {"api_name": "apps.payments.serializers.UserServicePlanPaymentSerializer", "line_number": 46, "usage_type": "call"}, {"api_name": "djoser.serializers.UserSerializer", "line_number": 61, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 62, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 62, "usage_type": "name"}, {"api_name": "rest_framework.serializers.SerializerMethodField", "line_number": 63, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 63, "usage_type": "name"}, {"api_name": "apps.user.models.User", "line_number": 66, "usage_type": "name"}, {"api_name": "apps.user.models.User.REQUIRED_FIELDS", "line_number": 67, "usage_type": "attribute"}, {"api_name": "apps.user.models.User", "line_number": 67, "usage_type": "name"}, {"api_name": "rest_framework.serializers.Serializer", "line_number": 93, "usage_type": "name"}, {"api_name": "rest_framework.serializers.BooleanField", "line_number": 94, "usage_type": "call"}, {"api_name": "rest_framework.serializers", "line_number": 94, "usage_type": "name"}]} {"seq_id": "17877259728", "text": "from socket import socket, AF_INET, SOCK_DGRAM\r\nimport argparse\r\nimport UDPClient, UDPServer\r\n\r\ndef is_valid_ipv4_address(address):\r\n if address.lower() == \"localhost\":\r\n return True\r\n\r\n # Split the string into four parts separated by dots\r\n parts = address.split('.')\r\n if len(parts) != 4:\r\n return False\r\n\r\n # Check that each part is an integer between 0 and 255\r\n for part in parts:\r\n try:\r\n num = int(part)\r\n except ValueError:\r\n return False\r\n if num < 0 or num > 255:\r\n return False\r\n\r\n return True\r\n\r\n\r\n#will eventually have to start the app using ChatApp -c \r\nif __name__ == '__main__':\r\n\r\n parser = argparse.ArgumentParser(description='ChatApp')\r\n \r\n # Client Arguments\r\n \r\n parser.add_argument('-c', '--clientName', type=str, help='Name of the client')\r\n parser.add_argument('args', metavar='arg', type=str, nargs='*', help='Command line arguments')\r\n\r\n # Server Arguments\r\n parser.add_argument('-s', '--server', action='store_true', help='Run as a server')\r\n\r\n args = parser.parse_args()\r\n\r\n # Now you can access the values of the arguments as follows:\r\n\r\n # Check if the program is running as a server or client\r\n if args.server:\r\n # The program is running as a server\r\n if len(args.args) != 1:\r\n raise Exception(\"Input should be ChatApp.py -s \")\r\n\r\n serverPort = int(args.args[0])\r\n if serverPort < 1024 or serverPort > 65535:\r\n raise Exception(\"Should be a port between 1024 and 65535\")\r\n\r\n UDPServer.serverMode(serverPort)\r\n else:\r\n # The program is running as a client \r\n \r\n if len(args.args) != 3:\r\n raise Exception(\"Input should be ChatApp.py -c \")\r\n \r\n clientName = args.clientName\r\n clientIP = args.args[0]\r\n \r\n if not is_valid_ipv4_address(clientIP):\r\n raise Exception(\"Should be a valid IP address\")\r\n\r\n serverPort = int(args.args[1])\r\n clientPort = int(args.args[2])\r\n\r\n if clientPort < 1024 or clientPort > 65535 or serverPort < 1024 or serverPort > 65535:\r\n raise Exception(\"Should be a port between 1024 and 65535\")\r\n\r\n UDPClient.clientMode(clientName, clientIP, serverPort, clientPort)\r\n ", "repo_name": "MatthewStridiron/UDP-Networking-Chat-App", "sub_path": "ChatApp.py", "file_name": "ChatApp.py", "file_ext": "py", "file_size_in_byte": 2382, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 29, "usage_type": "call"}, {"api_name": "UDPServer.serverMode", "line_number": 53, "usage_type": "call"}, {"api_name": "UDPClient.clientMode", "line_number": 72, "usage_type": "call"}]} {"seq_id": "40727214469", "text": "import os\nimport numpy as np\nimport gym\n\nfrom wgcsl.common import logger\nfrom wgcsl.algo.wgcsl import WGCSL\nfrom wgcsl.algo.supervised_sampler import make_sample_transitions, make_random_sample\nfrom wgcsl.common.monitor import Monitor\nfrom wgcsl.envs.multi_world_wrapper import PointGoalWrapper, SawyerGoalWrapper, ReacherGoalWrapper, FetchGoalWrapper\n\n# offline parameters\nDEFAULT_ENV_PARAMS = {\n 'Point2DLargeEnv-v1':{\n 'n_cycles':5,\n 'n_batches': 1,\n 'baw_delta': 0.15,\n },\n 'Point2D-FourRoom-v1':{\n 'n_cycles':5,\n 'n_batches': 1,\n 'baw_delta': 0.15,\n },\n 'SawyerReachXYZEnv-v1':{\n 'n_cycles':5,\n 'n_batches': 5, \n 'baw_delta': 0.15,\n 'num_epoch':100, \n },\n 'FetchReach-v1': {\n 'n_cycles': 5, \n 'n_batches': 5, \n 'baw_delta': 0.15,\n 'num_epoch':100, \n },\n 'Reacher-v2': {\n 'n_cycles': 10, \n 'n_batches': 10,\n 'baw_delta': 0.15,\n 'num_epoch':200, \n },\n 'SawyerDoor-v0':{\n 'n_cycles': 10, \n 'n_batches': 10, \n 'baw_delta': 0.15,\n 'num_epoch':200, \n },\n 'FetchPush-v1':{\n 'batch_size': 512,\n 'n_cycles': 20, \n 'n_batches': 20, \n 'baw_delta': 0.01,\n 'num_epoch':100, \n },\n 'FetchSlide-v1':{\n 'batch_size': 512, \n 'n_cycles': 20, \n 'n_batches': 20, \n 'baw_delta': 0.01,\n 'num_epoch':100, \n },\n 'FetchPickAndPlace-v1':{\n 'batch_size': 512,\n 'n_cycles': 20,\n 'n_batches': 20,\n 'baw_delta': 0.01,\n 'num_epoch':100,\n },\n 'HandReach-v0':{\n 'batch_size': 512,\n 'n_cycles': 20, \n 'n_batches': 20, \n 'baw_delta': 0.01,\n 'num_epoch':100, \n }\n}\n\n\nDEFAULT_PARAMS = { \n # env\n 'max_u': 1., # max absolute value of actions on each coordinate\n 'layers': 3, # number of layers in the critic/actor networks\n 'hidden': 256, # number of neurons in each hidden layers\n 'network_class': 'wgcsl.algo.actor_critic:ActorCritic',\n 'Q_lr': 5e-4, # critic learning rate\n 'pi_lr': 5e-4, # actor learning rate\n 'buffer_size': int(1E6), # for experience replay\n 'polyak': 0.9, #polyak averaging coefficient\n 'action_l2': 1.0, # quadratic penalty on actions (before rescaling by max_u)\n 'clip_obs': 200.,\n 'scope': 'wgcsl',\n 'relative_goals': False,\n # training\n 'num_epoch':50, \n 'n_cycles': 10, # per epoch\n 'rollout_batch_size': 1, # per mpi thread\n 'n_batches': 4, # training batches per cycle\n 'batch_size': 128, # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.\n 'n_test_rollouts': 100, # number of test rollouts per epoch, each consists of rollout_batch_size rollouts\n 'test_with_polyak': False, # run test episodes with the target network\n # exploration, not used in the offline setting\n 'random_eps': 0.3, # percentage of time a random action is taken\n 'noise_eps': 0.2, # std of gaussian noise added to not-completely-random actions as a percentage of max_u\n # random init episode, not used int the offline setting\n 'random_init':20,\n\n # goal relabeling\n 'replay_strategy': 'future', \n 'replay_k': 4, # number of additional goals used for replay\n # normalization\n 'norm_eps': 1e-4, # epsilon used for observation normalization\n 'norm_clip': 5, # normalized observations are cropped to this values\n # use supervised\n 'use_supervised': False,\n # best-advantage weight\n 'baw_delta': 0.1,\n 'baw_max': 80,\n\n # if do not use her\n 'no_relabel':False # used for no relabel\n}\n\n\nCACHED_ENVS = {}\n\n\ndef cached_make_env(make_env):\n \"\"\"\n Only creates a new environment from the provided function if one has not yet already been\n created. This is useful here because we need to infer certain properties of the env, e.g.\n its observation and action spaces, without any intend of actually using it.\n \"\"\"\n if make_env not in CACHED_ENVS:\n env = make_env()\n CACHED_ENVS[make_env] = env\n return CACHED_ENVS[make_env]\n\ndef prepare_mode(kwargs):\n if 'mode' in kwargs.keys():\n mode = kwargs['mode']\n if mode == 'supervised':\n kwargs['use_supervised'] = True\n else:\n kwargs['use_supervised'] = False\n else:\n kwargs['use_supervised'] = False\n return kwargs\n\n\ndef prepare_params(kwargs):\n # default max episode steps\n kwargs = prepare_mode(kwargs)\n default_max_episode_steps = 50\n # WGCSL params\n wgcsl_params = dict()\n env_name = kwargs['env_name']\n def make_env(subrank=None):\n try:\n env = gym.make(env_name, rewrad_type='sparse') \n except:\n logger.log('Can not make sparse reward environment')\n env = gym.make(env_name)\n # add wrapper for multiworld environment\n if env_name.startswith('Fetch'):\n env._max_episode_steps = 50\n env = FetchGoalWrapper(env)\n elif env_name.startswith('HandManipulate'):\n env._max_episode_steps = 100\n elif env_name.startswith('Point'):\n env = PointGoalWrapper(env)\n env.env._max_episode_steps = 50\n elif env_name.startswith('Sawyer'): \n env = SawyerGoalWrapper(env)\n elif env_name.startswith('Reacher'):\n env = ReacherGoalWrapper(env)\n\n if (subrank is not None and logger.get_dir() is not None):\n try:\n from mpi4py import MPI\n mpi_rank = MPI.COMM_WORLD.Get_rank()\n except ImportError:\n MPI = None\n mpi_rank = 0\n logger.warn('Running with a single MPI process. This should work, but the results may differ from the ones publshed in Plappert et al.')\n\n if hasattr(env, '_max_episode_steps'):\n max_episode_steps = env._max_episode_steps\n else:\n max_episode_steps = default_max_episode_steps # otherwise use defaulit max episode steps\n env = Monitor(env,\n os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),\n allow_early_resets=True)\n # hack to re-expose _max_episode_steps (ideally should replace reliance on it downstream)\n env = gym.wrappers.TimeLimit(env, max_episode_steps=max_episode_steps)\n return env\n\n kwargs['make_env'] = make_env\n tmp_env = cached_make_env(kwargs['make_env'])\n if hasattr(tmp_env, '_max_episode_steps'):\n kwargs['T'] = tmp_env._max_episode_steps\n else:\n kwargs['T'] = default_max_episode_steps\n\n kwargs['max_u'] = np.array(kwargs['max_u']) if isinstance(kwargs['max_u'], list) else kwargs['max_u']\n kwargs['gamma'] = 1. - 1. / kwargs['T']\n if 'lr' in kwargs:\n kwargs['pi_lr'] = kwargs['lr']\n kwargs['Q_lr'] = kwargs['lr']\n del kwargs['lr']\n for name in ['buffer_size', 'hidden', 'layers','network_class','polyak','batch_size', \n 'Q_lr', 'pi_lr', 'norm_eps', 'norm_clip', 'max_u','action_l2', 'clip_obs', \n 'scope', 'relative_goals', 'use_supervised']:\n wgcsl_params[name] = kwargs[name]\n kwargs['_' + name] = kwargs[name]\n del kwargs[name]\n \n kwargs['wgcsl_params'] = wgcsl_params\n return kwargs\n\n\ndef log_params(params, logger=logger):\n for key in sorted(params.keys()):\n logger.info('{}: {}'.format(key, params[key]))\n\n\ndef configure_her(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n\n def reward_fun(ag_2, g, info): # vectorized\n return env.compute_reward(achieved_goal=ag_2, desired_goal=g, info=info)\n\n # Prepare configuration for HER.\n her_params = {\n 'reward_fun': reward_fun,\n 'no_relabel': params['no_relabel']\n }\n for name in ['replay_strategy', 'replay_k']:\n her_params[name] = params[name]\n params['_' + name] = her_params[name]\n del params[name]\n\n sample_supervised, her_sampler = make_sample_transitions(**her_params)\n random_sampler = make_random_sample(her_params['reward_fun'])\n samplers = {\n 'random': random_sampler,\n 'her': her_sampler,\n 'supervised':sample_supervised\n }\n return samplers, reward_fun\n\ndef simple_goal_subtract(a, b):\n assert a.shape == b.shape\n return a - b\n\ndef configure_wgcsl(dims, params, reuse=False, use_mpi=True, clip_return=True, offline_train=False):\n samplers, reward_fun = configure_her(params)\n # Extract relevant parameters.\n rollout_batch_size = params['rollout_batch_size']\n wgcsl_params = params['wgcsl_params']\n\n input_dims = dims.copy()\n # WGCSL agent\n env = cached_make_env(params['make_env'])\n env.reset()\n wgcsl_params.update({'input_dims': input_dims, # agent takes an input observations\n 'T': params['T'],\n 'clip_pos_returns': True, # clip positive returns\n 'clip_return': (1. / (1. - params['gamma'])) if clip_return else np.inf, # max abs of return \n 'rollout_batch_size': rollout_batch_size,\n 'subtract_goals': simple_goal_subtract,\n 'sample_transitions': samplers['her'],\n 'random_sampler':samplers['random'],\n 'supervised_sampler':samplers['supervised'],\n 'gamma': params['gamma'],\n 'su_method': params['su_method'],\n 'baw_delta': params['baw_delta'],\n 'baw_max': params['baw_max'],\n })\n wgcsl_params['info'] = {\n 'env_name': params['env_name'],\n 'reward_fun':reward_fun\n } \n policy = WGCSL(reuse=reuse, **wgcsl_params, use_mpi=use_mpi, offline_train=offline_train) \n return policy\n\n\ndef configure_dims(params):\n env = cached_make_env(params['make_env'])\n env.reset()\n obs, _, _, info = env.step(env.action_space.sample())\n dims = {\n 'o': obs['observation'].shape[0],\n 'u': env.action_space.shape[0],\n 'g': obs['desired_goal'].shape[0],\n }\n return dims\n", "repo_name": "YangRui2015/AWGCSL", "sub_path": "wgcsl/algo/config.py", "file_name": "config.py", "file_ext": "py", "file_size_in_byte": 10288, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 26, "dataset": "github-code", "pt": "47", "api": [{"api_name": "gym.make", "line_number": 158, "usage_type": "call"}, {"api_name": "wgcsl.common.logger.log", "line_number": 160, "usage_type": "call"}, {"api_name": "wgcsl.common.logger", "line_number": 160, "usage_type": "name"}, {"api_name": "gym.make", "line_number": 161, "usage_type": "call"}, {"api_name": "wgcsl.envs.multi_world_wrapper.FetchGoalWrapper", "line_number": 165, "usage_type": "call"}, {"api_name": "wgcsl.envs.multi_world_wrapper.PointGoalWrapper", "line_number": 169, "usage_type": "call"}, {"api_name": "wgcsl.envs.multi_world_wrapper.SawyerGoalWrapper", "line_number": 172, "usage_type": "call"}, {"api_name": "wgcsl.envs.multi_world_wrapper.ReacherGoalWrapper", "line_number": 174, "usage_type": "call"}, {"api_name": "wgcsl.common.logger.get_dir", "line_number": 176, "usage_type": "call"}, {"api_name": "wgcsl.common.logger", "line_number": 176, "usage_type": "name"}, {"api_name": "mpi4py.MPI.COMM_WORLD.Get_rank", "line_number": 179, "usage_type": "call"}, {"api_name": "mpi4py.MPI.COMM_WORLD", "line_number": 179, "usage_type": "attribute"}, {"api_name": "mpi4py.MPI", "line_number": 179, "usage_type": "name"}, {"api_name": "mpi4py.MPI", "line_number": 181, "usage_type": "name"}, {"api_name": "wgcsl.common.logger.warn", "line_number": 183, "usage_type": "call"}, {"api_name": "wgcsl.common.logger", "line_number": 183, "usage_type": "name"}, {"api_name": "wgcsl.common.monitor.Monitor", "line_number": 189, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "wgcsl.common.logger.get_dir", "line_number": 190, "usage_type": "call"}, {"api_name": "wgcsl.common.logger", "line_number": 190, "usage_type": "name"}, {"api_name": "gym.wrappers.TimeLimit", "line_number": 193, "usage_type": "call"}, {"api_name": "gym.wrappers", "line_number": 193, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 203, "usage_type": "call"}, {"api_name": "wgcsl.common.logger", "line_number": 220, "usage_type": "name"}, {"api_name": "wgcsl.common.logger.info", "line_number": 222, "usage_type": "call"}, {"api_name": "wgcsl.common.logger", "line_number": 222, "usage_type": "name"}, {"api_name": "wgcsl.algo.supervised_sampler.make_sample_transitions", "line_number": 242, "usage_type": "call"}, {"api_name": "wgcsl.algo.supervised_sampler.make_random_sample", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 268, "usage_type": "attribute"}, {"api_name": "wgcsl.algo.wgcsl.WGCSL", "line_number": 283, "usage_type": "call"}]} {"seq_id": "41581870451", "text": "import scipy.special \nimport numpy as np\nimport itertools\n\n#federated_shap methods\nclass federated_shap():\n def __init__(self):\n pass\n\n def _powerset(self, iterable):\n s = list(iterable)\n return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+1))\n\n def _shapley_kernel(self, M ,s):\n if s == 0 or s == M:\n return 10000\n return (M-1)/(scipy.special.binom(M,s)*s*(M-s))\n\n\n\n #Original shap function\n '''\n f: model\n x: one instance with features\n reference: To determine the impact\n of a feature, that feature is set to \"missing\" and the change in the model output\n is observed. Since most models aren't designed to handle arbitrary missing data at test\n time, we simulate \"missing\" by replacing the feature with the values it takes in the\n background dataset. So if the background dataset is a simple sample of all zeros, then\n we would approximate a feature being missing by setting it to zero. For small problems\n this background dataset can be the whole training set, but for larger problems consider\n using a single reference value or using the kmeans function to summarize the dataset.\n M: number of features\n '''\n def kernel_shap(self, f, x, reference, M):\n\n X = np.zeros((2**M,M+1))\n X[:,-1] = 1\n weights = np.zeros(2**M)\n V = np.zeros((2**M,M))\n for i in range(2**M):\n V[i,:] = reference\n\n ws = {}\n for i,s in enumerate(self._powerset(range(M))):\n s = list(s)\n #print(s)\n V[i,s] = x[s]\n X[i,s] = 1\n ws[len(s)] = ws.get(len(s), 0) + self._shapley_kernel(M,len(s))\n weights[i] = self._shapley_kernel(M,len(s))\n y = f(V)\n tmp = np.linalg.inv(np.dot(np.dot(X.T, np.diag(weights)), X))\n return np.dot(tmp, np.dot(np.dot(X.T, np.diag(weights)), y))\n\n #Federated Shap Function\n '''\n f: model\n x: one instance with features\n reference: To determine the impact\n of a feature, that feature is set to \"missing\" and the change in the model output\n is observed. Since most models aren't designed to handle arbitrary missing data at test\n time, we simulate \"missing\" by replacing the feature with the values it takes in the\n background dataset. So if the background dataset is a simple sample of all zeros, then\n we would approximate a feature being missing by setting it to zero. For small problems\n this background dataset can be the whole training set, but for larger problems consider\n using a single reference value or using the kmeans function to summarize the dataset.\n M: number of features\n fed_pos: feature position in x start from which the features are hidden and aggregated\n '''\n def kernel_shap_federated(self, f, x, reference, M, fed_pos):\n M_real = M\n M_cur = fed_pos + 1 #with one extra feature as the aggregated hidden features\n\n X = np.zeros((2**M_cur,M_cur+1))\n X[:,-1] = 1\n\n weights = np.zeros(2**M_cur)\n V = np.zeros((2**M_cur,M_real))\n for i in range(2**M_cur):\n V[i,:] = reference\n\n ws = {}\n\n hidden_index = range(fed_pos, M_real)\n\n for i,s in enumerate(self._powerset(range(M_cur))):\n #s is the different combinations of features\n s = list(s)\n #print(x)\n #print(s)\n V[i,s] = x[s]\n #if s contains the last combined feature, those hidden features will be set to real values instead of reference\n if fed_pos in s:\n #print(x)\n #print(hidden_index)\n V[i,hidden_index] = x[hidden_index]\n X[i,s] = 1\n ws[len(s)] = ws.get(len(s), 0) + self._shapley_kernel(M_cur,len(s))\n weights[i] = self._shapley_kernel(M_cur,len(s))\n y = f(V)\n tmp = np.linalg.inv(np.dot(np.dot(X.T, np.diag(weights)), X))\n return np.dot(tmp, np.dot(np.dot(X.T, np.diag(weights)), y))\n\n\n\n###########Dummy Testing#########################\n#Function that imitates the model, takes in instance features and outputs predictons\ndef f(X):\n np.random.seed(0)\n beta = np.random.rand(X.shape[-1])\n return np.dot(X,beta) + 10\n\n#Original Shap\nprint(\"Original Shap Dummy Testing:\")\nM = 10\nnp.random.seed(1)\nx = np.random.randn(M)\nreference = np.zeros(M)\nfs = federated_shap()\nphi = fs.kernel_shap(f, x, reference, M)\nbase_value = phi[-1]\nshap_values = phi[:-1]\n\nprint(\" reference =\", reference)\nprint(\" x =\", x)\nprint(\"shap_values =\", shap_values)\nprint(\" base_value =\", base_value)\nprint(\" sum(phi) =\", np.sum(phi))\nprint(\" f(x) =\", f(x))\n\n#Federated Shap\nprint(\"Federated Shap Dummy Testing:\")\nM = 10\nnp.random.seed(1)\nx = np.random.randn(M)\nreference = np.zeros(M)\nfed_pos = 6\nfs = federated_shap()\nphi = fs.kernel_shap_federated(f, x, reference, M, fed_pos)\nbase_value = phi[-1]\nshap_values = phi[:-1]\n\nprint(\" reference =\", reference)\nprint(\" x =\", x)\nprint(\"shap_values =\", shap_values)\nprint(\" base_value =\", base_value)\nprint(\" sum(phi) =\", np.sum(phi))\nprint(\" f(x) =\", f(x))", "repo_name": "crownpku/federated_shap", "sub_path": "federated_shap.py", "file_name": "federated_shap.py", "file_ext": "py", "file_size_in_byte": 5330, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 34, "dataset": "github-code", "pt": "47", "api": [{"api_name": "itertools.chain.from_iterable", "line_number": 12, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 12, "usage_type": "attribute"}, {"api_name": "itertools.combinations", "line_number": 12, "usage_type": "call"}, {"api_name": "scipy.special.special.binom", "line_number": 17, "usage_type": "call"}, {"api_name": "scipy.special.special", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scipy.special", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 102, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 110, "usage_type": "attribute"}, {"api_name": "numpy.random.rand", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 111, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 117, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 118, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 135, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 135, "usage_type": "attribute"}, {"api_name": "numpy.random.randn", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 148, "usage_type": "call"}]} {"seq_id": "6950950394", "text": "import json\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\nfrom json.decoder import JSONDecodeError\nfrom http import HTTPStatus\n\nfrom httpx import HTTPStatusError\nimport httpx\n\nfrom austrakka.utils.exceptions import FailedResponseException\nfrom austrakka.utils.exceptions import UnknownResponseException\nfrom austrakka.utils.output import log_response\nfrom austrakka.utils.context import CxtKey\nfrom austrakka.utils.context import get_ctx_value\nfrom austrakka import __version__\n\nCONTENT_TYPE_JSON = 'application/json'\nCONTENT_TYPE_MULTIPART = 'multipart/form-data; charset=utf-8; boundary=+++'\n\nMODE_SKIP = 'skip'\nMODE_OVERWRITE = 'overwrite'\nMODE = 'mode'\n\n\ndef _get_default_headers(\n content_type: str = CONTENT_TYPE_JSON,\n) -> Dict:\n default_headers = {\n 'Content-Type': content_type,\n 'Authorization': f'Bearer {get_ctx_value(CxtKey.CTX_TOKEN)}',\n 'User-Agent': f'austrakka/{__version__}',\n }\n return default_headers\n\n\ndef _check_response(response: httpx.Response):\n # pylint: disable=raise-missing-from\n try:\n parsed_resp = response.json()\n if 'data' not in parsed_resp or 'messages' not in parsed_resp:\n raise UnknownResponseException(\n f'{response.status_code}: {parsed_resp}'\n )\n try:\n response.raise_for_status()\n except HTTPStatusError:\n raise FailedResponseException(parsed_resp)\n except JSONDecodeError:\n raise UnknownResponseException(\n f'{response.status_code}: {response.text}'\n )\n\n\ndef _get_data(body: Union[Dict, List] = None) -> str:\n return json.dumps(body) if body is not None else None\n\n\ndef _get_url(path: str):\n return f'{get_ctx_value(CxtKey.CTX_URI)}/api/{path}'\n\n\ndef get_response(response: httpx.Response, log_resp: bool = False) -> Dict:\n _check_response(response)\n parsed_resp = {} \\\n if response.status_code == HTTPStatus.NO_CONTENT else response.json()\n if log_resp:\n log_response(parsed_resp)\n return parsed_resp\n\n\ndef _get_client(\n content_type: str = CONTENT_TYPE_JSON\n):\n return httpx.Client(\n headers=_get_default_headers(content_type),\n verify=get_ctx_value(CxtKey.CTX_VERIFY_CERT),\n timeout=300,\n http2=get_ctx_value(CxtKey.CTX_USE_HTTP2),\n )\n\n\ndef _use_http_client(\n content_type: str = CONTENT_TYPE_JSON,\n log_resp: bool = False,\n parse_resp: bool = True\n):\n def decorator(func):\n def inner_func(*args, **kwargs):\n with _get_client(content_type) as client:\n response = func(*args, **kwargs, client=client)\n if parse_resp:\n return get_response(response, log_resp)\n return response\n return inner_func\n return decorator\n\n\n@_use_http_client()\ndef api_get(\n path: str,\n params: Dict = None,\n client: httpx.Client = None,\n):\n return client.get(\n _get_url(path),\n params=params,\n )\n\n\ndef api_get_stream(\n path: str,\n func: Callable[[httpx.Response], None],\n):\n \"\"\"\n Throws httpx.HTTPStatusError with status is not 2xx.\n \"\"\"\n resp: httpx.Response\n with _get_client().stream(\"GET\", _get_url(path)) as resp:\n resp.raise_for_status()\n func(resp)\n\n\n@_use_http_client(content_type=CONTENT_TYPE_MULTIPART, log_resp=True)\ndef api_post_multipart(\n path: str,\n files,\n params: Dict = None,\n data: Union[Dict, List] = None,\n custom_headers: Dict = None,\n client: httpx.Client = None,\n):\n custom_headers = {} if custom_headers is None else custom_headers\n return client.post(\n _get_url(path),\n data=data,\n params=params,\n files=files,\n headers=dict(client.headers) | custom_headers\n )\n\n\n@_use_http_client(content_type=CONTENT_TYPE_MULTIPART,\n log_resp=True, parse_resp=False)\ndef api_post_multipart_raw(\n path: str,\n files,\n params: Dict = None,\n data: Union[Dict, List] = None,\n custom_headers: Dict = None,\n client: httpx.Client = None,\n):\n custom_headers = {} if custom_headers is None else custom_headers\n return client.post(\n _get_url(path),\n data=data,\n params=params,\n files=files,\n headers=dict(client.headers) | custom_headers\n )\n\n\n@_use_http_client(log_resp=True)\ndef api_post(\n path: str,\n params: Dict = None,\n data: Union[Dict, List] = None,\n client: httpx.Client = None,\n):\n return client.post(\n _get_url(path),\n data=json.dumps(data),\n params=params,\n )\n\n\n@_use_http_client(log_resp=True)\ndef api_put(\n path: str,\n params: Dict = None,\n data: Union[Dict, List] = None,\n client: httpx.Client = None,\n):\n return client.put(\n _get_url(path),\n data=json.dumps(data),\n params=params,\n )\n\n\n@_use_http_client(log_resp=True)\ndef api_patch(\n path: str,\n params: Dict = None,\n data: Union[Dict, List] = None,\n client: httpx.Client = None,\n):\n return client.patch(\n _get_url(path),\n data=json.dumps(data),\n params=params,\n )\n\n\n@_use_http_client(log_resp=True)\ndef api_delete(\n path: str,\n params: Dict = None,\n custom_headers: Dict = None,\n client: httpx.Client = None,\n):\n return client.delete(\n _get_url(path),\n params=params,\n headers=dict(client.headers) | custom_headers\n )\n\n\ndef set_mode_header(custom_headers, force, skip):\n if skip:\n custom_headers[MODE] = MODE_SKIP\n if force:\n custom_headers[MODE] = MODE_OVERWRITE\n", "repo_name": "AusTrakka/austrakka2-cli", "sub_path": "austrakka/utils/api.py", "file_name": "api.py", "file_ext": "py", "file_size_in_byte": 5765, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "47", "api": [{"api_name": "austrakka.utils.context.get_ctx_value", "line_number": 32, "usage_type": "call"}, {"api_name": "austrakka.utils.context.CxtKey.CTX_TOKEN", "line_number": 32, "usage_type": "attribute"}, {"api_name": "austrakka.utils.context.CxtKey", "line_number": 32, "usage_type": "name"}, {"api_name": "austrakka.__version__", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 29, "usage_type": "name"}, {"api_name": "httpx.Response", "line_number": 38, "usage_type": "attribute"}, {"api_name": "austrakka.utils.exceptions.UnknownResponseException", "line_number": 43, "usage_type": "call"}, {"api_name": "httpx.HTTPStatusError", "line_number": 48, "usage_type": "name"}, {"api_name": "austrakka.utils.exceptions.FailedResponseException", "line_number": 49, "usage_type": "call"}, {"api_name": "json.decoder.JSONDecodeError", "line_number": 50, "usage_type": "name"}, {"api_name": "austrakka.utils.exceptions.UnknownResponseException", "line_number": 51, "usage_type": "call"}, {"api_name": "typing.Union", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 56, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 56, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 57, "usage_type": "call"}, {"api_name": "austrakka.utils.context.get_ctx_value", "line_number": 61, "usage_type": "call"}, {"api_name": "austrakka.utils.context.CxtKey.CTX_URI", "line_number": 61, "usage_type": "attribute"}, {"api_name": "austrakka.utils.context.CxtKey", "line_number": 61, "usage_type": "name"}, {"api_name": "httpx.Response", "line_number": 64, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus.NO_CONTENT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "http.HTTPStatus", "line_number": 67, "usage_type": "name"}, {"api_name": "austrakka.utils.output.log_response", "line_number": 69, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 64, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 76, "usage_type": "call"}, {"api_name": "austrakka.utils.context.get_ctx_value", "line_number": 78, "usage_type": "call"}, {"api_name": "austrakka.utils.context.CxtKey.CTX_VERIFY_CERT", "line_number": 78, "usage_type": "attribute"}, {"api_name": "austrakka.utils.context.CxtKey", "line_number": 78, "usage_type": "name"}, {"api_name": "austrakka.utils.context.get_ctx_value", "line_number": 80, "usage_type": "call"}, {"api_name": "austrakka.utils.context.CxtKey.CTX_USE_HTTP2", "line_number": 80, "usage_type": "attribute"}, {"api_name": "austrakka.utils.context.CxtKey", "line_number": 80, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 103, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 104, "usage_type": "attribute"}, {"api_name": "typing.Callable", "line_number": 114, "usage_type": "name"}, {"api_name": "httpx.Response", "line_number": 114, "usage_type": "attribute"}, {"api_name": "httpx.Response", "line_number": 119, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 130, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 131, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 132, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 149, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 150, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 151, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 152, "usage_type": "attribute"}, {"api_name": "typing.Dict", "line_number": 167, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 168, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 168, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 168, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 169, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 173, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 181, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 182, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 182, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 183, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 187, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 195, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 196, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 197, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 201, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 209, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 210, "usage_type": "name"}, {"api_name": "httpx.Client", "line_number": 211, "usage_type": "attribute"}]} {"seq_id": "74767467982", "text": "import asyncio\r\nimport thread\r\nimport threading\r\nimport time\r\n\r\n#\t==========================Function==========================\t\r\n\r\ndef consumer():\r\n\tr = ''\r\n\twhile True:\r\n\t\tn = yield r\r\n\t\tif not n:\r\n\t\t\treturn\r\n\t\tprint('[CONSUMER] Consuming {0}...'.format(n))\r\n\t\tr = '200 OK'\r\n\r\ndef produce(c):\r\n\tc.send(None)\r\n\tn = 0\r\n\twhile n < 5:\r\n\t\tn = n + 1\r\n\t\tprint('[PRODUCER] Producing {0}...'.format(n))\r\n\t\tr = c.send(n)\r\n\t\tprint('[PRODUCER] Consumer return: {0}'.format(r))\r\n\tc.close()\r\n\r\n@asyncio.coroutine\r\ndef hello_1():\r\n\tcount = 0\r\n\t\r\n\t#while count < 10:\r\n\t#\tprint(\"Hello {0} times\".format(count))\r\n\t#\tcount += 1\r\n\t#\tr = yield from asyncio.sleep(1)\r\n\t\t\r\n\tprint(\"Hello world!\")\r\n\t# 异步调用asyncio.sleep(1):\r\n\tr = yield from asyncio.sleep(1)\r\n\tprint(\"Hello again! r:{0}\".format(r))\r\n\t\r\n@asyncio.coroutine\r\ndef hello_2():\r\n\tprint('Hello world! {0}'.format(threading.currentThread()))\r\n\tyield from asyncio.sleep(1)\r\n\tprint('Hello again! {0}'.format(threading.currentThread()))\r\n\t\r\n@asyncio.coroutine\r\ndef web_get(host):\r\n\tprint(\"Web host :{0}\".format(host))\r\n\tconnect = asyncio.open_connection(host,80)\r\n\treader , writer = yield from connect\r\n\theader = 'GET / HTTP/1.0\\r\\nHost: %s\\r\\n\\r\\n' % host\r\n\twriter.write(header.encode('utf-8'))\r\n\tyield from writer.drain()\r\n\twhile True:\r\n\t\tline = yield from reader.readline()\r\n\t\tif line == b'\\r\\n':\r\n\t\t\tbreak\r\n\t\tprint('%s header > %s' % (host, line.decode('utf-8').rstrip()))\r\n\t# Ignore the body, close the socket\r\n\twriter.close()\r\n\t\r\n#\t==========================main==========================\r\ntry:\r\n\t\r\n\tmodel = int(input(\"Enter Test Function:\"))\r\n\t\r\n\tif model == 1:\r\n\t\t#\t非同步\r\n\t\tc = consumer()\r\n\t\tproduce(c)\r\n\t\r\n\telif model == 2:\r\n\t\t# 获取EventLoop:\r\n\t\tloop = asyncio.get_event_loop()\r\n\t\t# 执行coroutine\r\n\t\tloop.run_until_complete(hello_1())\r\n\t\tloop.close()\r\n\t\t\r\n\telif model == 3:\r\n\t\t# 获取EventLoop:\r\n\t\tloop = asyncio.get_event_loop()\r\n\t\t# 执行coroutine\r\n\t\ttasks = [hello_2(), hello_2()]\r\n\t\tloop.run_until_complete(asyncio.wait(tasks))\r\n\t\tloop.close()\r\n\t\r\n\telif model == 4:\r\n\t\t# 获取EventLoop:\r\n\t\tloop = asyncio.get_event_loop()\r\n\t\t# 执行coroutine\r\n\t\ttasks = [web_get(host) for host in ['www.sina.com.cn', 'www.sohu.com', 'www.163.com']]\r\n\t\tloop.run_until_complete(asyncio.wait(tasks))\r\n\t\tloop.close()\r\n\t\r\nexcept Exception as ex:\r\n\tprint(\"Error: {0}\".format(ex))\r\n\r\n#input(\"Press Enter to continue...\\n\")", "repo_name": "rexmin0629/Python", "sub_path": "Async_IO.py", "file_name": "Async_IO.py", "file_ext": "py", "file_size_in_byte": 2364, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "asyncio.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 27, "usage_type": "attribute"}, {"api_name": "threading.currentThread", "line_number": 43, "usage_type": "call"}, {"api_name": "asyncio.sleep", "line_number": 44, "usage_type": "call"}, {"api_name": "threading.currentThread", "line_number": 45, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 41, "usage_type": "attribute"}, {"api_name": "asyncio.open_connection", "line_number": 50, "usage_type": "call"}, {"api_name": "asyncio.coroutine", "line_number": 47, "usage_type": "attribute"}, {"api_name": "asyncio.get_event_loop", "line_number": 75, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 82, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 85, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 90, "usage_type": "call"}, {"api_name": "asyncio.wait", "line_number": 93, "usage_type": "call"}]} {"seq_id": "34258665622", "text": "\"\"\"Object Oriented Programming.\"\"\"\n# Sometimes, our programs will get large, and require several pieces of data.\n# With Classes, we can group these pieces of data together, and use it accordingly.\nfrom typing import Self\n\n__all__ = [\"main_classing\"]\n__version__ = \"0.2\"\n__author__ = \"Overzealous Lotus\"\n__license__ = \"GPL-3.0\"\n\n\ndef main_classing() -> None:\n \"\"\"First function to run our program.\"\"\"\n\n class SoftwareEngineer():\n \"\"\"Instance attributes.\"\"\"\n\n # class attributes\n alias = \"Keyboard Mage\" # This is a class-wide attribute\n\n def __init__(self: Self, name: str, age: int, level: str, salary: int) -> None: # Define our attributes\n self.name = name\n self.age = age\n self.level = level\n self.salary = salary\n\n # instance method\n\n def coding(self: Self) -> None: # These are basic methods we define ourself in classes\n \"\"\"Lets our user know who is coding right now.\"\"\"\n print(f\"{self.name} is writing code...\")\n\n def c_language(self: Self, language: str) -> None: # Without self, does not work\n \"\"\"Lets our user know what language who is writing in.\"\"\"\n print(f\"{self.name} is writing code in {language}...\")\n\n # Dunder method\n def __str__(self: Self) -> str: # When called, info is shown\n \"\"\"Information about our objects.\"\"\"\n info = f\"Name = {self.name}, age = {self.age}, level = {self.level}\"\n return info\n\n # Equalizer Method\n def __eq__(self: Self, other: Self) -> bool: # By default, compares memory\n \"\"\"Comparison between two objects.\"\"\"\n return self.name == other.name and self.age == other.age\n\n @staticmethod # Fixes a problem when the self attribute is not present\n def entry_salary(age: int) -> int:\n \"\"\"Define what a worker's salary is based on age.\"\"\"\n junior_age, senior_age = 25, 30\n if age < junior_age:\n return 5000\n if age < senior_age:\n return 7000\n return 9000\n\n # instance\n\n worker_one = SoftwareEngineer(\"Jason\", 20, \"Junior\", 5000)\n worker_two = SoftwareEngineer(\"Garret\", 25, \"Senior\", 7000)\n worker_tres = SoftwareEngineer(\"Garret\", 25, \"Senior\", 7000)\n\n worker_one.coding()\n worker_two.coding()\n\n worker_one.c_language(\"Python\")\n worker_two.c_language(\"Bash\")\n\n print(f\"{worker_one} and {worker_two}\")\n\n print(worker_tres == worker_two)\n\n\nif __name__ == \"__main__\":\n main_classing()\n", "repo_name": "OverzealousLotus/Blossom-Bloom", "sub_path": "Python/Intermediate/Classes/classes.py", "file_name": "classes.py", "file_ext": "py", "file_size_in_byte": 2584, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "47", "api": [{"api_name": "typing.Self", "line_number": 21, "usage_type": "name"}, {"api_name": "typing.Self", "line_number": 29, "usage_type": "name"}, {"api_name": "typing.Self", "line_number": 33, "usage_type": "name"}, {"api_name": "typing.Self", "line_number": 38, "usage_type": "name"}, {"api_name": "typing.Self", "line_number": 44, "usage_type": "name"}]} {"seq_id": "27098932118", "text": "import tensorflow as tf\nfrom keras.layers import (Conv3D, BatchNormalization, AveragePooling3D, concatenate,\n Activation, Input, GlobalAvgPool3D, Dense)\nfrom keras.regularizers import l2 as l2_penalty\nfrom keras.models import Model\nfrom keras.utils.vis_utils import plot_model\nfrom utils.losses import fmeasure, precision, recall\n\n\ndef conv_block(x, activation, filters, bottleneck, kernel_initializer, weights_decay, bn_scale):\n x = BatchNormalization(scale=bn_scale, axis=-1)(x)\n x = activation()(x)\n x = Conv3D(filters, kernel_size=(1, 1, 1), padding='same', use_bias=False,\n kernel_initializer=kernel_initializer, kernel_regularizer=l2_penalty(weights_decay))(x)\n x = BatchNormalization(scale=bn_scale, axis=-1)(x)\n x = activation()(x)\n x = Conv3D(filters // bottleneck, kernel_size=(3, 3, 3), padding='same', use_bias=True,\n kernel_initializer=kernel_initializer, kernel_regularizer=l2_penalty(weights_decay))(x)\n return x\n\n\ndef dense_block(x, k, n, bottleneck,\n activation, kernel_initializer, weights_decay, bn_scale):\n for _ in range(n):\n conv = conv_block(x, activation, k, bottleneck,\n kernel_initializer, weights_decay, bn_scale)\n x = concatenate([conv, x], axis=-1)\n return x\n\n\ndef transmit_block(x, compression, activation, bn_scale, kernel_initializer, weights_decay):\n x = BatchNormalization(scale=bn_scale, axis=-1)(x)\n x = activation()(x)\n if (compression is not None) and (compression > 1):\n *_, f = x.get_shape().as_list()\n x = Conv3D(f // compression, kernel_size=(1, 1, 1), padding='same', use_bias=True,\n kernel_initializer=kernel_initializer, kernel_regularizer=l2_penalty(weights_decay))(x)\n x = AveragePooling3D((2, 2, 2), padding='valid')(x)\n else:\n x = GlobalAvgPool3D()(x)\n return x\n\n\ndef get_model(dhw=[48, 48, 48], k=64, n=3, bottleneck=4, compression=2, first_layer=32,\n activation=lambda: Activation('relu'), bn_scale=True,\n weights_decay=0., kernel_initializer='he_uniform', weights=None):\n shape = dhw + [1]\n\n inputs = Input(shape=shape)\n conv = Conv3D(first_layer, kernel_size=(3, 3, 3), padding='same', use_bias=True,\n kernel_initializer=kernel_initializer, kernel_regularizer=l2_penalty(weights_decay))(inputs)\n\n transmit_down_count = 4\n for l in range(transmit_down_count):\n db = dense_block(conv, k, n, bottleneck,\n activation, kernel_initializer, weights_decay, bn_scale)\n if l == transmit_down_count - 1:\n conv = transmit_block(db, None, activation,\n bn_scale, kernel_initializer, weights_decay)\n else:\n conv = transmit_block(db, compression, activation,\n bn_scale, kernel_initializer, weights_decay)\n\n outputs = Dense(1, kernel_regularizer=l2_penalty(weights_decay),\n kernel_initializer=kernel_initializer, activation='sigmoid')(conv)\n\n model = Model(inputs, outputs)\n model.summary()\n\n if weights is not None:\n model.load_weights(weights, by_name=True)\n return model\n\n\ndef get_compiled(dhw=[48, 48, 48], k=64, n=3, bottleneck=4, compression=2, first_layer=32,\n loss='binary_crossentropy', optimizer='adam', weights_decay=0.,\n kernel_initializer='he_uniform', weights=None,\n activation=lambda: Activation('relu'), bn_scale=True):\n model = get_model(dhw, k, n, bottleneck, compression, first_layer,\n activation, bn_scale, weights_decay, kernel_initializer, weights)\n model.compile(loss=loss, optimizer=optimizer,\n metrics=[loss, 'accuracy', fmeasure, precision, recall])\n return model\n\n\nif __name__ == '__main__':\n # model = get_model()\n # model.summary()\n model = get_compiled()\n plot_model(model,to_file='desnetbc_v1.png',show_shapes=True,show_layer_names=True)\n", "repo_name": "Yuxiang1990/keras", "sub_path": "lib/models/densebc_v1.py", "file_name": "densebc_v1.py", "file_ext": "py", "file_size_in_byte": 4039, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "keras.layers.BatchNormalization", "line_number": 11, "usage_type": "call"}, {"api_name": "keras.layers.Conv3D", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 14, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 15, "usage_type": "call"}, {"api_name": "keras.layers.Conv3D", "line_number": 17, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.layers.concatenate", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.layers.BatchNormalization", "line_number": 32, "usage_type": "call"}, {"api_name": "keras.layers.Conv3D", "line_number": 36, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 37, "usage_type": "call"}, {"api_name": "keras.layers.AveragePooling3D", "line_number": 38, "usage_type": "call"}, {"api_name": "keras.layers.GlobalAvgPool3D", "line_number": 40, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 45, "usage_type": "call"}, {"api_name": "keras.layers.Input", "line_number": 49, "usage_type": "call"}, {"api_name": "keras.layers.Conv3D", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 51, "usage_type": "call"}, {"api_name": "keras.layers.Dense", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.regularizers.l2", "line_number": 64, "usage_type": "call"}, {"api_name": "keras.models.Model", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.Activation", "line_number": 78, "usage_type": "call"}, {"api_name": "utils.losses.fmeasure", "line_number": 82, "usage_type": "name"}, {"api_name": "utils.losses.precision", "line_number": 82, "usage_type": "name"}, {"api_name": "utils.losses.recall", "line_number": 82, "usage_type": "name"}, {"api_name": "keras.utils.vis_utils.plot_model", "line_number": 90, "usage_type": "call"}]} {"seq_id": "20042386923", "text": "from lxml.etree import parse\nimport time\n\ndoc = parse(\"/Users/ianbicking/Downloads/ianbickingablog.wordpress.2013-03-29 (1).xml\")\nchannel = doc.getroot()[0]\nitems = channel.findall(\"item\")\n\nfor item in items:\n title = item.find(\"title\").text.strip()\n link = item.find(\"link\").text.strip()\n date = item.find(\"pubDate\").text.strip().split(\"+\")[0].strip()\n date = time.strptime(date, \"%a, %d %b %Y %H:%M:%S\")\n date = time.strftime(\"%B %Y\", date)\n print ('
  • %(title)s (%(date)s)
  • '\n % dict(\n link=link, title=title, date=date))\n", "repo_name": "ianb/blog", "sub_path": "bin/archivist.py", "file_name": "archivist.py", "file_ext": "py", "file_size_in_byte": 591, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "47", "api": [{"api_name": "lxml.etree.parse", "line_number": 4, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 12, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 13, "usage_type": "call"}]} {"seq_id": "28805069995", "text": "import torch\nimport random\nimport copy\nimport numpy as np\nfrom PIL import Image\nfrom typing import Any, Callable, Optional, Tuple, List\nfrom torchvision.datasets import CIFAR10, CIFAR100\nimport torch.utils.data as data\n\n\nclass UnlearnCIFAR100(CIFAR100):\n def __init__(self, root: str,\n data_set: str = 'train',\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n forget_range: Any = None,\n ignore_range: Any = None,\n data_section: str = 'full',\n random_seed: int = 0,\n ):\n \"\"\"\n Additional Args:\n data_set (str) ['train', 'test', 'val']: \n data_section (str) ['full', 'retain', 'forget']: which section of data to use\n \"\"\"\n super().__init__(root, data_set=='train', transform, target_transform, download)\n if data_section not in ['full', 'retain', 'forget']:\n raise RuntimeError('Invalid data section. The valid data sections are [\"full\", \"retain\", \"forget\"]')\n if data_set not in ['train', 'test', 'val']:\n raise RuntimeError('Invalid data section. The valid data sections are [\"train\", \"test\", \"val\"]')\n\n self.data_set = data_set\n self.data_section = data_section\n self.ignore_range = ignore_range\n self.forget_range = forget_range\n\n num_classes = len(set(self.targets))\n random.seed(random_seed)\n self.targets = np.array(self.targets)\n\n if self.data_set != 'train':\n set_idx = []\n for i in range(num_classes):\n idx = np.where(self.targets == i)[0].tolist()\n random.shuffle(idx)\n set_idx.extend(idx[:len(idx)//2] if self.data_set == 'test' else idx[len(idx)//2:])\n\n self.data = self.data[set_idx, :, :, :]\n self.targets = self.targets[set_idx]\n \n\n self.retain_idx, self.forget_idx, self.ignore_idx = [], [], []\n if self.data_section != 'full':\n for i in range(num_classes):\n idx = np.where(self.targets == i)[0].tolist()\n if self.ignore_range is not None and len(self.ignore_range) > i:\n self.ignore_idx.extend(idx[self.ignore_range[i][0]:self.ignore_range[i][1]])\n if self.forget_range is not None and len(self.forget_range) > i:\n self.forget_idx.extend(idx[self.forget_range[i][0]:self.forget_range[i][1]])\n\n self.retain_idx = list(set([i for i in range(len(self.targets))]) - set(self.forget_idx) - set(self.ignore_idx))\n\n self.full_data = self.data\n self.full_targets = self.targets\n\n self.forget_data = self.data[self.forget_idx, :, :, :]\n self.forget_targets = np.array(self.targets)[self.forget_idx].tolist()\n\n self.retain_data = self.data[self.retain_idx, :, :, :]\n self.retain_targets = np.array(self.targets)[self.retain_idx].tolist()\n\n if self.data_section == 'full':\n self.selected_data, self.selected_targets = self.full_data, self.full_targets\n elif self.data_section == 'retain':\n self.selected_data, self.selected_targets = self.retain_data, self.retain_targets\n elif self.data_section == 'forget':\n self.selected_data, self.selected_targets = self.forget_data, self.forget_targets\n\n print(f'Number of forget samples: {len(self.forget_idx)}')\n print(f'Number of selected samples: {self.__len__()}')\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target, index) where target is index of the target class.\n \"\"\"\n img, target = self.selected_data[index], self.selected_targets[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self) -> int:\n return len(self.selected_data)\n\n\nclass UnlearnCIFAR10(UnlearnCIFAR100):\n base_folder = \"cifar-10-batches-py\"\n url = \"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n filename = \"cifar-10-python.tar.gz\"\n tgz_md5 = \"c58f30108f718f92721af3b95e74349a\"\n train_list = [\n [\"data_batch_1\", \"c99cafc152244af753f735de768cd75f\"],\n [\"data_batch_2\", \"d4bba439e000b95fd0a9bffe97cbabec\"],\n [\"data_batch_3\", \"54ebc095f3ab1f0389bbae665268c751\"],\n [\"data_batch_4\", \"634d18415352ddfa80567beed471001a\"],\n [\"data_batch_5\", \"482c414d41f54cd18b22e5b47cb7c3cb\"],\n ]\n\n test_list = [\n [\"test_batch\", \"40351d587109b95175f43aff81a1287e\"],\n ]\n meta = {\n \"filename\": \"batches.meta\",\n \"key\": \"label_names\",\n \"md5\": \"5ff9c542aee3614f3951f8cda6e48888\",\n }", "repo_name": "hnanhtuan/projected_gradient_unlearning", "sub_path": "datasets/unlearn_cifar.py", "file_name": "unlearn_cifar.py", "file_ext": "py", "file_size_in_byte": 5102, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "47", "api": [{"api_name": "torchvision.datasets.CIFAR100", "line_number": 11, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 14, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 15, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 18, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 45, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 94, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 94, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 83, "usage_type": "name"}]} {"seq_id": "37050498342", "text": "from sklearn.decomposition import non_negative_factorization\nimport torch\nimport models\nimport os\n\nclass Hierarchy:\n \"\"\"\n Defining the structure of the hierarchy used for the Extended MNIST Dataset\n \"\"\"\n def __init__(self, pretrained_path = './trained_weights'):\n \n self.nodes = ['root', 'SG1', 'SG2', 'SG3', 'SG4', 'SG5', 'SG6', 'SG7', 'SG8', 'SG9', 'SG10', 'SG11', 'SG12', 'SG13']\n self.valid_paths = [\n (0,0,0),\n (1,0,0),\n (0,3,0),\n (2,),\n (3,0), \n (4,0),\n (3,11),\n (5,),\n (0,6),\n (0,5,2),\n (3,4),\n (0,7),\n (6,0),\n (0,1),\n (7,),\n (8,0,0),\n (3,12),\n (3,1),\n (1,0,1),\n (3,7),\n (1,1),\n (1,0,2),\n (9,0),\n (10,0),\n (0,0,1),\n (11,),\n (0,4),\n (3,8),\n (4,1),\n (8,2),\n (3,2),\n (3,5),\n (10,1),\n (3,6),\n (3,0),\n (0,3,1),\n (0,2),\n (3,10),\n (12,),\n (6,1),\n (8,0,1),\n (0,5,1),\n (3,9),\n (9,1),\n (0,5,0),\n (13,), \n (8,1),\n ]\n self.leaves = (0,1,2,3,4,5,6,7,8,9,'A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'd', 'e', 'f', 'g', 'h', 'i', 'n', 'q', 'r', 't')\n self.pretrained_path = pretrained_path\n\n self.next = {\n 'root': ['SG1', 'SG2', None, 'SG3', 'SG4', None, 'SG5', None, 'SG6', 'SG7', 'SG8', None, None, None ],\n 'SG1': ['SG9', None, None, 'SG10', None, 'SG11', None, None],\n 'SG2': ['SG12', None],\n 'SG6': ['SG13', None, None],\n }\n\n self.path_to_leaf = dict(\n zip(\n self.valid_paths,\n self.leaves\n )\n )\n\n self.path_to_target = dict(\n zip(\n self.valid_paths, \n list(range(len(self.valid_paths)))\n )\n )\n\n def getNext(self, current_node, child):\n if current_node not in self.next:\n return None\n\n next_child = self.next[current_node][child]\n return next_child\n \n def getLeaf(self, path):\n path = tuple(path)\n if path not in self.path_to_leaf:\n return None\n return self.path_to_leaf[path]\n\n def getTarget(self, path):\n path = tuple(path)\n if path not in self.path_to_target:\n return None\n return self.path_to_target[path]\n\n def getModel(self, DNN_name):\n model = getattr(models, \"get_\"+DNN_name+\"_model\")()\n path = os.path.join(self.pretrained_path,DNN_name+'.pth')\n model.load_state_dict(torch.load(path))\n return model", "repo_name": "abhinavgoel95/TRUNK", "sub_path": "EMNIST/hierarchy.py", "file_name": "hierarchy.py", "file_ext": "py", "file_size_in_byte": 3005, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "47", "api": [{"api_name": "os.path.join", "line_number": 107, "usage_type": "call"}, {"api_name": "os.path", "line_number": 107, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 108, "usage_type": "call"}]} {"seq_id": "19739985199", "text": "#!/usr/bin/env python\n#===============================================================================\n# Author: Will Fenton\n# Date: October 17 2019\n#===============================================================================\n\nfrom pysndfx import AudioEffectsChain\nimport moviepy.editor as mp\n\nimport os\nimport sys\nimport math\nimport getopt\n\n#===============================================================================\n\ndef print_usage():\n sys.stderr.write(\n\"\"\"Usage: python3 slowed-reverb.py [options]\nOptions:\n (-a | --audio)