Search (675 results, page 1 of 34)

  • × type_ss:"x"
  1. Verwer, K.: Freiheit und Verantwortung bei Hans Jonas (2011) 0.19
    0.18546988 = product of:
      0.6800562 = sum of:
        0.06532684 = product of:
          0.1959805 = sum of:
            0.1959805 = weight(_text_:3a in 973) [ClassicSimilarity], result of:
              0.1959805 = score(doc=973,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                1.1240361 = fieldWeight in 973, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.09375 = fieldNorm(doc=973)
          0.33333334 = coord(1/3)
        0.1959805 = weight(_text_:2f in 973) [ClassicSimilarity], result of:
          0.1959805 = score(doc=973,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            1.1240361 = fieldWeight in 973, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.09375 = fieldNorm(doc=973)
        0.013393938 = weight(_text_:und in 973) [ClassicSimilarity], result of:
          0.013393938 = score(doc=973,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.29385152 = fieldWeight in 973, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.09375 = fieldNorm(doc=973)
        0.013393938 = weight(_text_:und in 973) [ClassicSimilarity], result of:
          0.013393938 = score(doc=973,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.29385152 = fieldWeight in 973, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.09375 = fieldNorm(doc=973)
        0.1959805 = weight(_text_:2f in 973) [ClassicSimilarity], result of:
          0.1959805 = score(doc=973,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            1.1240361 = fieldWeight in 973, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.09375 = fieldNorm(doc=973)
        0.1959805 = weight(_text_:2f in 973) [ClassicSimilarity], result of:
          0.1959805 = score(doc=973,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            1.1240361 = fieldWeight in 973, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.09375 = fieldNorm(doc=973)
      0.27272728 = coord(6/22)
    
    Content
    Vgl.: http%3A%2F%2Fcreativechoice.org%2Fdoc%2FHansJonas.pdf&usg=AOvVaw1TM3teaYKgABL5H9yoIifA&opi=89978449.
  2. Gabler, S.: Vergabe von DDC-Sachgruppen mittels eines Schlagwort-Thesaurus (2021) 0.11
    0.10852432 = product of:
      0.2984419 = sum of:
        0.027219517 = product of:
          0.08165855 = sum of:
            0.08165855 = weight(_text_:3a in 1000) [ClassicSimilarity], result of:
              0.08165855 = score(doc=1000,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                0.46834838 = fieldWeight in 1000, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=1000)
          0.33333334 = coord(1/3)
        0.08165855 = weight(_text_:2f in 1000) [ClassicSimilarity], result of:
          0.08165855 = score(doc=1000,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 1000, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=1000)
        0.009666242 = weight(_text_:und in 1000) [ClassicSimilarity], result of:
          0.009666242 = score(doc=1000,freq=6.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.21206908 = fieldWeight in 1000, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=1000)
        0.009666242 = weight(_text_:und in 1000) [ClassicSimilarity], result of:
          0.009666242 = score(doc=1000,freq=6.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.21206908 = fieldWeight in 1000, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=1000)
        0.08165855 = weight(_text_:2f in 1000) [ClassicSimilarity], result of:
          0.08165855 = score(doc=1000,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 1000, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=1000)
        0.08165855 = weight(_text_:2f in 1000) [ClassicSimilarity], result of:
          0.08165855 = score(doc=1000,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 1000, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=1000)
        0.0029728229 = weight(_text_:in in 1000) [ClassicSimilarity], result of:
          0.0029728229 = score(doc=1000,freq=4.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.10626988 = fieldWeight in 1000, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=1000)
        0.0039414368 = product of:
          0.0078828735 = sum of:
            0.0078828735 = weight(_text_:science in 1000) [ClassicSimilarity], result of:
              0.0078828735 = score(doc=1000,freq=2.0), product of:
                0.0541719 = queryWeight, product of:
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.02056547 = queryNorm
                0.1455159 = fieldWeight in 1000, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=1000)
          0.5 = coord(1/2)
      0.36363637 = coord(8/22)
    
    Abstract
    Vorgestellt wird die Konstruktion eines thematisch geordneten Thesaurus auf Basis der Sachschlagwörter der Gemeinsamen Normdatei (GND) unter Nutzung der darin enthaltenen DDC-Notationen. Oberste Ordnungsebene dieses Thesaurus werden die DDC-Sachgruppen der Deutschen Nationalbibliothek. Die Konstruktion des Thesaurus erfolgt regelbasiert unter der Nutzung von Linked Data Prinzipien in einem SPARQL Prozessor. Der Thesaurus dient der automatisierten Gewinnung von Metadaten aus wissenschaftlichen Publikationen mittels eines computerlinguistischen Extraktors. Hierzu werden digitale Volltexte verarbeitet. Dieser ermittelt die gefundenen Schlagwörter über Vergleich der Zeichenfolgen Benennungen im Thesaurus, ordnet die Treffer nach Relevanz im Text und gibt die zugeordne-ten Sachgruppen rangordnend zurück. Die grundlegende Annahme dabei ist, dass die gesuchte Sachgruppe unter den oberen Rängen zurückgegeben wird. In einem dreistufigen Verfahren wird die Leistungsfähigkeit des Verfahrens validiert. Hierzu wird zunächst anhand von Metadaten und Erkenntnissen einer Kurzautopsie ein Goldstandard aus Dokumenten erstellt, die im Online-Katalog der DNB abrufbar sind. Die Dokumente vertei-len sich über 14 der Sachgruppen mit einer Losgröße von jeweils 50 Dokumenten. Sämtliche Dokumente werden mit dem Extraktor erschlossen und die Ergebnisse der Kategorisierung do-kumentiert. Schließlich wird die sich daraus ergebende Retrievalleistung sowohl für eine harte (binäre) Kategorisierung als auch eine rangordnende Rückgabe der Sachgruppen beurteilt.
    Content
    Master thesis Master of Science (Library and Information Studies) (MSc), Universität Wien. Advisor: Christoph Steiner. Vgl.: https://www.researchgate.net/publication/371680244_Vergabe_von_DDC-Sachgruppen_mittels_eines_Schlagwort-Thesaurus. DOI: 10.25365/thesis.70030. Vgl. dazu die Präsentation unter: https://www.google.com/url?sa=i&rct=j&q=&esrc=s&source=web&cd=&ved=0CAIQw7AJahcKEwjwoZzzytz_AhUAAAAAHQAAAAAQAg&url=https%3A%2F%2Fwiki.dnb.de%2Fdownload%2Fattachments%2F252121510%2FDA3%2520Workshop-Gabler.pdf%3Fversion%3D1%26modificationDate%3D1671093170000%26api%3Dv2&psig=AOvVaw0szwENK1or3HevgvIDOfjx&ust=1687719410889597&opi=89978449.
  3. Xiong, C.: Knowledge based text representations for information retrieval (2016) 0.10
    0.10158607 = product of:
      0.31927052 = sum of:
        0.021775614 = product of:
          0.06532684 = sum of:
            0.06532684 = weight(_text_:3a in 5820) [ClassicSimilarity], result of:
              0.06532684 = score(doc=5820,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                0.3746787 = fieldWeight in 5820, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.03125 = fieldNorm(doc=5820)
          0.33333334 = coord(1/3)
        0.0923861 = weight(_text_:2f in 5820) [ClassicSimilarity], result of:
          0.0923861 = score(doc=5820,freq=4.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.5298757 = fieldWeight in 5820, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.03125 = fieldNorm(doc=5820)
        0.0923861 = weight(_text_:2f in 5820) [ClassicSimilarity], result of:
          0.0923861 = score(doc=5820,freq=4.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.5298757 = fieldWeight in 5820, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.03125 = fieldNorm(doc=5820)
        0.0923861 = weight(_text_:2f in 5820) [ClassicSimilarity], result of:
          0.0923861 = score(doc=5820,freq=4.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.5298757 = fieldWeight in 5820, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.03125 = fieldNorm(doc=5820)
        0.0050450475 = weight(_text_:in in 5820) [ClassicSimilarity], result of:
          0.0050450475 = score(doc=5820,freq=18.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.18034597 = fieldWeight in 5820, product of:
              4.2426405 = tf(freq=18.0), with freq of:
                18.0 = termFreq=18.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.03125 = fieldNorm(doc=5820)
        0.0121384105 = weight(_text_:computer in 5820) [ClassicSimilarity], result of:
          0.0121384105 = score(doc=5820,freq=2.0), product of:
            0.0751567 = queryWeight, product of:
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.02056547 = queryNorm
            0.16150802 = fieldWeight in 5820, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.03125 = fieldNorm(doc=5820)
        0.0031531493 = product of:
          0.0063062985 = sum of:
            0.0063062985 = weight(_text_:science in 5820) [ClassicSimilarity], result of:
              0.0063062985 = score(doc=5820,freq=2.0), product of:
                0.0541719 = queryWeight, product of:
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.02056547 = queryNorm
                0.11641272 = fieldWeight in 5820, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.03125 = fieldNorm(doc=5820)
          0.5 = coord(1/2)
      0.3181818 = coord(7/22)
    
    Abstract
    The successes of information retrieval (IR) in recent decades were built upon bag-of-words representations. Effective as it is, bag-of-words is only a shallow text understanding; there is a limited amount of information for document ranking in the word space. This dissertation goes beyond words and builds knowledge based text representations, which embed the external and carefully curated information from knowledge bases, and provide richer and structured evidence for more advanced information retrieval systems. This thesis research first builds query representations with entities associated with the query. Entities' descriptions are used by query expansion techniques that enrich the query with explanation terms. Then we present a general framework that represents a query with entities that appear in the query, are retrieved by the query, or frequently show up in the top retrieved documents. A latent space model is developed to jointly learn the connections from query to entities and the ranking of documents, modeling the external evidence from knowledge bases and internal ranking features cooperatively. To further improve the quality of relevant entities, a defining factor of our query representations, we introduce learning to rank to entity search and retrieve better entities from knowledge bases. In the document representation part, this thesis research also moves one step forward with a bag-of-entities model, in which documents are represented by their automatic entity annotations, and the ranking is performed in the entity space.
    Content
    Submitted in partial fulfillment of the requirements for the degree of Doctor of Philosophy in Language and Information Technologies. Vgl.: https%3A%2F%2Fwww.cs.cmu.edu%2F~cx%2Fpapers%2Fknowledge_based_text_representation.pdf&usg=AOvVaw0SaTSvhWLTh__Uz_HtOtl3.
    Imprint
    Pittsburgh, PA : Carnegie Mellon University, School of Computer Science, Language Technologies Institute
  4. Huo, W.: Automatic multi-word term extraction and its application to Web-page summarization (2012) 0.09
    0.094885804 = product of:
      0.3479146 = sum of:
        0.09799025 = weight(_text_:2f in 563) [ClassicSimilarity], result of:
          0.09799025 = score(doc=563,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.56201804 = fieldWeight in 563, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.046875 = fieldNorm(doc=563)
        0.09799025 = weight(_text_:2f in 563) [ClassicSimilarity], result of:
          0.09799025 = score(doc=563,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.56201804 = fieldWeight in 563, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.046875 = fieldNorm(doc=563)
        0.09799025 = weight(_text_:2f in 563) [ClassicSimilarity], result of:
          0.09799025 = score(doc=563,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.56201804 = fieldWeight in 563, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.046875 = fieldNorm(doc=563)
        0.005640535 = weight(_text_:in in 563) [ClassicSimilarity], result of:
          0.005640535 = score(doc=563,freq=10.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.20163295 = fieldWeight in 563, product of:
              3.1622777 = tf(freq=10.0), with freq of:
                10.0 = termFreq=10.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.046875 = fieldNorm(doc=563)
        0.018207615 = weight(_text_:computer in 563) [ClassicSimilarity], result of:
          0.018207615 = score(doc=563,freq=2.0), product of:
            0.0751567 = queryWeight, product of:
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.02056547 = queryNorm
            0.24226204 = fieldWeight in 563, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.046875 = fieldNorm(doc=563)
        0.030095704 = sum of:
          0.01337768 = weight(_text_:science in 563) [ClassicSimilarity], result of:
            0.01337768 = score(doc=563,freq=4.0), product of:
              0.0541719 = queryWeight, product of:
                2.6341193 = idf(docFreq=8627, maxDocs=44218)
                0.02056547 = queryNorm
              0.24694869 = fieldWeight in 563, product of:
                2.0 = tf(freq=4.0), with freq of:
                  4.0 = termFreq=4.0
                2.6341193 = idf(docFreq=8627, maxDocs=44218)
                0.046875 = fieldNorm(doc=563)
          0.016718024 = weight(_text_:22 in 563) [ClassicSimilarity], result of:
            0.016718024 = score(doc=563,freq=2.0), product of:
              0.072016776 = queryWeight, product of:
                3.5018296 = idf(docFreq=3622, maxDocs=44218)
                0.02056547 = queryNorm
              0.23214069 = fieldWeight in 563, product of:
                1.4142135 = tf(freq=2.0), with freq of:
                  2.0 = termFreq=2.0
                3.5018296 = idf(docFreq=3622, maxDocs=44218)
                0.046875 = fieldNorm(doc=563)
      0.27272728 = coord(6/22)
    
    Abstract
    In this thesis we propose three new word association measures for multi-word term extraction. We combine these association measures with LocalMaxs algorithm in our extraction model and compare the results of different multi-word term extraction methods. Our approach is language and domain independent and requires no training data. It can be applied to such tasks as text summarization, information retrieval, and document classification. We further explore the potential of using multi-word terms as an effective representation for general web-page summarization. We extract multi-word terms from human written summaries in a large collection of web-pages, and generate the summaries by aligning document words with these multi-word terms. Our system applies machine translation technology to learn the aligning process from a training set and focuses on selecting high quality multi-word terms from human written summaries to generate suitable results for web-page summarization.
    Content
    A Thesis presented to The University of Guelph In partial fulfilment of requirements for the degree of Master of Science in Computer Science. Vgl. Unter: http://www.inf.ufrgs.br%2F~ceramisch%2Fdownload_files%2Fpublications%2F2009%2Fp01.pdf.
    Date
    10. 1.2013 19:22:47
  5. Farazi, M.: Faceted lightweight ontologies : a formalization and some experiments (2010) 0.09
    0.094581224 = product of:
      0.29725528 = sum of:
        0.027219517 = product of:
          0.08165855 = sum of:
            0.08165855 = weight(_text_:3a in 4997) [ClassicSimilarity], result of:
              0.08165855 = score(doc=4997,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                0.46834838 = fieldWeight in 4997, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=4997)
          0.33333334 = coord(1/3)
        0.08165855 = weight(_text_:2f in 4997) [ClassicSimilarity], result of:
          0.08165855 = score(doc=4997,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 4997, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4997)
        0.08165855 = weight(_text_:2f in 4997) [ClassicSimilarity], result of:
          0.08165855 = score(doc=4997,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 4997, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4997)
        0.08165855 = weight(_text_:2f in 4997) [ClassicSimilarity], result of:
          0.08165855 = score(doc=4997,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 4997, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4997)
        0.0059456457 = weight(_text_:in in 4997) [ClassicSimilarity], result of:
          0.0059456457 = score(doc=4997,freq=16.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.21253976 = fieldWeight in 4997, product of:
              4.0 = tf(freq=16.0), with freq of:
                16.0 = termFreq=16.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4997)
        0.015173013 = weight(_text_:computer in 4997) [ClassicSimilarity], result of:
          0.015173013 = score(doc=4997,freq=2.0), product of:
            0.0751567 = queryWeight, product of:
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.02056547 = queryNorm
            0.20188503 = fieldWeight in 4997, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4997)
        0.0039414368 = product of:
          0.0078828735 = sum of:
            0.0078828735 = weight(_text_:science in 4997) [ClassicSimilarity], result of:
              0.0078828735 = score(doc=4997,freq=2.0), product of:
                0.0541719 = queryWeight, product of:
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.02056547 = queryNorm
                0.1455159 = fieldWeight in 4997, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=4997)
          0.5 = coord(1/2)
      0.3181818 = coord(7/22)
    
    Abstract
    While classifications are heavily used to categorize web content, the evolution of the web foresees a more formal structure - ontology - which can serve this purpose. Ontologies are core artifacts of the Semantic Web which enable machines to use inference rules to conduct automated reasoning on data. Lightweight ontologies bridge the gap between classifications and ontologies. A lightweight ontology (LO) is an ontology representing a backbone taxonomy where the concept of the child node is more specific than the concept of the parent node. Formal lightweight ontologies can be generated from their informal ones. The key applications of formal lightweight ontologies are document classification, semantic search, and data integration. However, these applications suffer from the following problems: the disambiguation accuracy of the state of the art NLP tools used in generating formal lightweight ontologies from their informal ones; the lack of background knowledge needed for the formal lightweight ontologies; and the limitation of ontology reuse. In this dissertation, we propose a novel solution to these problems in formal lightweight ontologies; namely, faceted lightweight ontology (FLO). FLO is a lightweight ontology in which terms, present in each node label, and their concepts, are available in the background knowledge (BK), which is organized as a set of facets. A facet can be defined as a distinctive property of the groups of concepts that can help in differentiating one group from another. Background knowledge can be defined as a subset of a knowledge base, such as WordNet, and often represents a specific domain.
    Content
    PhD Dissertation at International Doctorate School in Information and Communication Technology. Vgl.: https%3A%2F%2Fcore.ac.uk%2Fdownload%2Fpdf%2F150083013.pdf&usg=AOvVaw2n-qisNagpyT0lli_6QbAQ.
    Imprint
    Trento : University / Department of information engineering and computer science
  6. Shala, E.: ¬Die Autonomie des Menschen und der Maschine : gegenwärtige Definitionen von Autonomie zwischen philosophischem Hintergrund und technologischer Umsetzbarkeit (2014) 0.09
    0.09439713 = product of:
      0.2966767 = sum of:
        0.027219517 = product of:
          0.08165855 = sum of:
            0.08165855 = weight(_text_:3a in 4388) [ClassicSimilarity], result of:
              0.08165855 = score(doc=4388,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                0.46834838 = fieldWeight in 4388, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=4388)
          0.33333334 = coord(1/3)
        0.08165855 = weight(_text_:2f in 4388) [ClassicSimilarity], result of:
          0.08165855 = score(doc=4388,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 4388, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4388)
        0.009666242 = weight(_text_:und in 4388) [ClassicSimilarity], result of:
          0.009666242 = score(doc=4388,freq=6.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.21206908 = fieldWeight in 4388, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4388)
        0.009666242 = weight(_text_:und in 4388) [ClassicSimilarity], result of:
          0.009666242 = score(doc=4388,freq=6.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.21206908 = fieldWeight in 4388, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4388)
        0.08165855 = weight(_text_:2f in 4388) [ClassicSimilarity], result of:
          0.08165855 = score(doc=4388,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 4388, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4388)
        0.08165855 = weight(_text_:2f in 4388) [ClassicSimilarity], result of:
          0.08165855 = score(doc=4388,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 4388, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4388)
        0.00514908 = weight(_text_:in in 4388) [ClassicSimilarity], result of:
          0.00514908 = score(doc=4388,freq=12.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.18406484 = fieldWeight in 4388, product of:
              3.4641016 = tf(freq=12.0), with freq of:
                12.0 = termFreq=12.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=4388)
      0.3181818 = coord(7/22)
    
    Abstract
    Werden Maschinen mit Begriffen beschrieben, die ursprünglich der Beschreibung des Menschen dienen, so liegt zunächst der Verdacht nahe, dass jene Maschinen spezifischmenschliche Fähigkeiten oder Eigenschaften besitzen. Für körperliche Fähigkeiten, die mechanisch nachgeahmt werden, hat sich in der Alltagssprache eine anthropomorphisierende Sprechweise bereits etabliert. So wird kaum in Frage gestellt, dass bestimmte Maschinen weben, backen, sich bewegen oder arbeiten können. Bei nichtkörperlichen Eigenschaften, etwa kognitiver, sozialer oder moralischer Art sieht dies jedoch anders aus. Dass mittlerweile intelligente und rechnende Maschinen im alltäglichen Sprachgebrauch Eingang gefunden haben, wäre jedoch undenkbar ohne den langjährigen Diskurs über Künstliche Intelligenz, welcher insbesondere die zweite Hälfte des vergangenen Jahrhunderts geprägt hat. In jüngster Zeit ist es der Autonomiebegriff, welcher zunehmend Verwendung zur Beschreibung neuer Technologien findet, wie etwa "autonome mobile Roboter" oder "autonome Systeme". Dem Begriff nach rekurriert die "Autonomie" jener Technologien auf eine bestimmte Art technologischen Fortschritts, die von der Fähigkeit zur Selbstgesetzgebung herrührt. Dies wirft aus philosophischer Sicht jedoch die Frage auf, wie die Selbstgesetzgebung in diesem Fall definiert ist, zumal sich der Autonomiebegriff in der Philosophie auf die politische oder moralische Selbstgesetzgebung von Menschen oder Menschengruppen beziehungsweise ihre Handlungen bezieht. Im Handbuch Robotik hingegen führt der Autor geradezu beiläufig die Bezeichnung "autonom" ein, indem er prognostiziert, dass "[.] autonome Roboter in Zukunft sogar einen Großteil der Altenbetreuung übernehmen werden."
    Footnote
    Vgl. unter: https://www.google.de/url?sa=t&rct=j&q=&esrc=s&source=web&cd=2&cad=rja&uact=8&ved=2ahUKEwizweHljdbcAhVS16QKHXcFD9QQFjABegQICRAB&url=https%3A%2F%2Fwww.researchgate.net%2Fpublication%2F271200105_Die_Autonomie_des_Menschen_und_der_Maschine_-_gegenwartige_Definitionen_von_Autonomie_zwischen_philosophischem_Hintergrund_und_technologischer_Umsetzbarkeit_Redigierte_Version_der_Magisterarbeit_Karls&usg=AOvVaw06orrdJmFF2xbCCp_hL26q.
  7. Piros, A.: Az ETO-jelzetek automatikus interpretálásának és elemzésének kérdései (2018) 0.06
    0.06253818 = product of:
      0.27516797 = sum of:
        0.027219517 = product of:
          0.08165855 = sum of:
            0.08165855 = weight(_text_:3a in 855) [ClassicSimilarity], result of:
              0.08165855 = score(doc=855,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                0.46834838 = fieldWeight in 855, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=855)
          0.33333334 = coord(1/3)
        0.08165855 = weight(_text_:2f in 855) [ClassicSimilarity], result of:
          0.08165855 = score(doc=855,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 855, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=855)
        0.08165855 = weight(_text_:2f in 855) [ClassicSimilarity], result of:
          0.08165855 = score(doc=855,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 855, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=855)
        0.08165855 = weight(_text_:2f in 855) [ClassicSimilarity], result of:
          0.08165855 = score(doc=855,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.46834838 = fieldWeight in 855, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.0390625 = fieldNorm(doc=855)
        0.0029728229 = weight(_text_:in in 855) [ClassicSimilarity], result of:
          0.0029728229 = score(doc=855,freq=4.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.10626988 = fieldWeight in 855, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=855)
      0.22727273 = coord(5/22)
    
    Abstract
    Converting UDC numbers manually to a complex format such as the one mentioned above is an unrealistic expectation; supporting building these representations, as far as possible automatically, is a well-founded requirement. An additional advantage of this approach is that the existing records could also be processed and converted. In my dissertation I would like to prove also that it is possible to design and implement an algorithm that is able to convert pre-coordinated UDC numbers into the introduced format by identifying all their elements and revealing their whole syntactic structure as well. In my dissertation I will discuss a feasible way of building a UDC-specific XML schema for describing the most detailed and complicated UDC numbers (containing not only the common auxiliary signs and numbers, but also the different types of special auxiliaries). The schema definition is available online at: http://piros.udc-interpreter.hu#xsd. The primary goal of my research is to prove that it is possible to support building, retrieving, and analyzing UDC numbers without compromises, by taking the whole syntactic richness of the scheme by storing the UDC numbers reserving the meaning of pre-coordination. The research has also included the implementation of a software that parses UDC classmarks attended to prove that such solution can be applied automatically without any additional effort or even retrospectively on existing collections.
    Content
    Vgl. auch: New automatic interpreter for complex UDC numbers. Unter: <https%3A%2F%2Fudcc.org%2Ffiles%2FAttilaPiros_EC_36-37_2014-2015.pdf&usg=AOvVaw3kc9CwDDCWP7aArpfjrs5b>
  8. Stojanovic, N.: Ontology-based Information Retrieval : methods and tools for cooperative query answering (2005) 0.05
    0.050920095 = product of:
      0.22404842 = sum of:
        0.021775614 = product of:
          0.06532684 = sum of:
            0.06532684 = weight(_text_:3a in 701) [ClassicSimilarity], result of:
              0.06532684 = score(doc=701,freq=2.0), product of:
                0.17435429 = queryWeight, product of:
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.02056547 = queryNorm
                0.3746787 = fieldWeight in 701, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  8.478011 = idf(docFreq=24, maxDocs=44218)
                  0.03125 = fieldNorm(doc=701)
          0.33333334 = coord(1/3)
        0.06532684 = weight(_text_:2f in 701) [ClassicSimilarity], result of:
          0.06532684 = score(doc=701,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.3746787 = fieldWeight in 701, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.03125 = fieldNorm(doc=701)
        0.06532684 = weight(_text_:2f in 701) [ClassicSimilarity], result of:
          0.06532684 = score(doc=701,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.3746787 = fieldWeight in 701, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.03125 = fieldNorm(doc=701)
        0.06532684 = weight(_text_:2f in 701) [ClassicSimilarity], result of:
          0.06532684 = score(doc=701,freq=2.0), product of:
            0.17435429 = queryWeight, product of:
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.02056547 = queryNorm
            0.3746787 = fieldWeight in 701, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              8.478011 = idf(docFreq=24, maxDocs=44218)
              0.03125 = fieldNorm(doc=701)
        0.00629228 = weight(_text_:in in 701) [ClassicSimilarity], result of:
          0.00629228 = score(doc=701,freq=28.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.22493094 = fieldWeight in 701, product of:
              5.2915025 = tf(freq=28.0), with freq of:
                28.0 = termFreq=28.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.03125 = fieldNorm(doc=701)
      0.22727273 = coord(5/22)
    
    Abstract
    By the explosion of possibilities for a ubiquitous content production, the information overload problem reaches the level of complexity which cannot be managed by traditional modelling approaches anymore. Due to their pure syntactical nature traditional information retrieval approaches did not succeed in treating content itself (i.e. its meaning, and not its representation). This leads to a very low usefulness of the results of a retrieval process for a user's task at hand. In the last ten years ontologies have been emerged from an interesting conceptualisation paradigm to a very promising (semantic) modelling technology, especially in the context of the Semantic Web. From the information retrieval point of view, ontologies enable a machine-understandable form of content description, such that the retrieval process can be driven by the meaning of the content. However, the very ambiguous nature of the retrieval process in which a user, due to the unfamiliarity with the underlying repository and/or query syntax, just approximates his information need in a query, implies a necessity to include the user in the retrieval process more actively in order to close the gap between the meaning of the content and the meaning of a user's query (i.e. his information need). This thesis lays foundation for such an ontology-based interactive retrieval process, in which the retrieval system interacts with a user in order to conceptually interpret the meaning of his query, whereas the underlying domain ontology drives the conceptualisation process. In that way the retrieval process evolves from a query evaluation process into a highly interactive cooperation between a user and the retrieval system, in which the system tries to anticipate the user's information need and to deliver the relevant content proactively. Moreover, the notion of content relevance for a user's query evolves from a content dependent artefact to the multidimensional context-dependent structure, strongly influenced by the user's preferences. This cooperation process is realized as the so-called Librarian Agent Query Refinement Process. In order to clarify the impact of an ontology on the retrieval process (regarding its complexity and quality), a set of methods and tools for different levels of content and query formalisation is developed, ranging from pure ontology-based inferencing to keyword-based querying in which semantics automatically emerges from the results. Our evaluation studies have shown that the possibilities to conceptualize a user's information need in the right manner and to interpret the retrieval results accordingly are key issues for realizing much more meaningful information retrieval systems.
    Content
    Vgl.: http%3A%2F%2Fdigbib.ubka.uni-karlsruhe.de%2Fvolltexte%2Fdocuments%2F1627&ei=tAtYUYrBNoHKtQb3l4GYBw&usg=AFQjCNHeaxKkKU3-u54LWxMNYGXaaDLCGw&sig2=8WykXWQoDKjDSdGtAakH2Q&bvm=bv.44442042,d.Yms.
  9. Parsian, D.: Überlegungen zur Aufstellungssystematik und Reklassifikation an der Fachbereichsbibliothek Afrikawissenschaften und Orientalistik (2007) 0.04
    0.040998373 = product of:
      0.15032737 = sum of:
        0.03600715 = product of:
          0.10802145 = sum of:
            0.10802145 = weight(_text_:c3 in 3396) [ClassicSimilarity], result of:
              0.10802145 = score(doc=3396,freq=2.0), product of:
                0.20053345 = queryWeight, product of:
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.02056547 = queryNorm
                0.5386705 = fieldWeight in 3396, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=3396)
          0.33333334 = coord(1/3)
        0.019332483 = weight(_text_:und in 3396) [ClassicSimilarity], result of:
          0.019332483 = score(doc=3396,freq=24.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.42413816 = fieldWeight in 3396, product of:
              4.8989797 = tf(freq=24.0), with freq of:
                24.0 = termFreq=24.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=3396)
        0.03600715 = product of:
          0.10802145 = sum of:
            0.10802145 = weight(_text_:c3 in 3396) [ClassicSimilarity], result of:
              0.10802145 = score(doc=3396,freq=2.0), product of:
                0.20053345 = queryWeight, product of:
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.02056547 = queryNorm
                0.5386705 = fieldWeight in 3396, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=3396)
          0.33333334 = coord(1/3)
        0.03600715 = product of:
          0.10802145 = sum of:
            0.10802145 = weight(_text_:c3 in 3396) [ClassicSimilarity], result of:
              0.10802145 = score(doc=3396,freq=2.0), product of:
                0.20053345 = queryWeight, product of:
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.02056547 = queryNorm
                0.5386705 = fieldWeight in 3396, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=3396)
          0.33333334 = coord(1/3)
        0.019332483 = weight(_text_:und in 3396) [ClassicSimilarity], result of:
          0.019332483 = score(doc=3396,freq=24.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.42413816 = fieldWeight in 3396, product of:
              4.8989797 = tf(freq=24.0), with freq of:
                24.0 = termFreq=24.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=3396)
        0.00364095 = weight(_text_:in in 3396) [ClassicSimilarity], result of:
          0.00364095 = score(doc=3396,freq=6.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.1301535 = fieldWeight in 3396, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=3396)
      0.27272728 = coord(6/22)
    
    Abstract
    Der praktische Einsatz der Dewey-Dezimalklassifikation (DDC) für die inhaltliche Erschließung sowie als Aufstellungssystematik in wissenschaftlichen Bibliotheken des deutschen Sprachraums hat wenig Tradition und wurde bisher von der Literatur kaum aufgearbeitet. Nach einer Darstellung der Rahmenbedingungen und der Problemlage in der Fachbereichsbibliothek Afrikanistik/Orientalistik der Universität Wien, gibt der Autor einen Überblick über die Erfahrungen mit und die Einschätzung von DDC in vergleichbaren wissenschaftlichen Bibliotheken vor allem im deutschen und englischen Sprachraum, definiert Kriterien für eine neue Systematik und klärt inwieweit diese mit dem Einsatz von DDC erfüllbar sind. Ausgehend von den quantitativen und räumlichen Rahmenbedingungen und der Segmentierung des Bestandes im Hinblick auf die Erfordernisse der Reklassifikation, sowie auf der Basis eigener Erfahrungen und Plausibilitätsprüfungen schätzt der Autor anhand von drei Varianten den nötigen Personal- und Zeitaufwand für den Einsatz von DDC im Rahmen eines Reklassifizierungsprojektes. Abschließend enthält die vorliegende Arbeit praktische Erfahrungen im Umgang mit der DDC am Beispiel des Themenbereiches "Islamwissenschaft", durch die auf einige Besonderheiten und Probleme bei der Verwendung von DDC für die Reklassifizierung hingewiesen wird.
    Footnote
    Vgl. unter: http://othes.univie.ac.at/3016/1/Parsian_%C3%9Cberlegungen_zur_Aufstellungssystematik_und_Reklassifikation_an_der_AFOR.pdf.
  10. Bender, B.: Digitale Mündigkeit? : Definition und Dimensionen: eine theoretische Fundierung zwischen Medienbildung und Medienkompetenz (2016) 0.04
    0.03555391 = product of:
      0.13036433 = sum of:
        0.028805722 = product of:
          0.08641716 = sum of:
            0.08641716 = weight(_text_:c3 in 4089) [ClassicSimilarity], result of:
              0.08641716 = score(doc=4089,freq=2.0), product of:
                0.20053345 = queryWeight, product of:
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.02056547 = queryNorm
                0.4309364 = fieldWeight in 4089, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.03125 = fieldNorm(doc=4089)
          0.33333334 = coord(1/3)
        0.018941889 = weight(_text_:und in 4089) [ClassicSimilarity], result of:
          0.018941889 = score(doc=4089,freq=36.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.41556883 = fieldWeight in 4089, product of:
              6.0 = tf(freq=36.0), with freq of:
                36.0 = termFreq=36.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.03125 = fieldNorm(doc=4089)
        0.028805722 = product of:
          0.08641716 = sum of:
            0.08641716 = weight(_text_:c3 in 4089) [ClassicSimilarity], result of:
              0.08641716 = score(doc=4089,freq=2.0), product of:
                0.20053345 = queryWeight, product of:
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.02056547 = queryNorm
                0.4309364 = fieldWeight in 4089, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.03125 = fieldNorm(doc=4089)
          0.33333334 = coord(1/3)
        0.028805722 = product of:
          0.08641716 = sum of:
            0.08641716 = weight(_text_:c3 in 4089) [ClassicSimilarity], result of:
              0.08641716 = score(doc=4089,freq=2.0), product of:
                0.20053345 = queryWeight, product of:
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.02056547 = queryNorm
                0.4309364 = fieldWeight in 4089, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  9.7509775 = idf(docFreq=6, maxDocs=44218)
                  0.03125 = fieldNorm(doc=4089)
          0.33333334 = coord(1/3)
        0.018941889 = weight(_text_:und in 4089) [ClassicSimilarity], result of:
          0.018941889 = score(doc=4089,freq=36.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.41556883 = fieldWeight in 4089, product of:
              6.0 = tf(freq=36.0), with freq of:
                36.0 = termFreq=36.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.03125 = fieldNorm(doc=4089)
        0.006063393 = weight(_text_:in in 4089) [ClassicSimilarity], result of:
          0.006063393 = score(doc=4089,freq=26.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.2167489 = fieldWeight in 4089, product of:
              5.0990195 = tf(freq=26.0), with freq of:
                26.0 = termFreq=26.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.03125 = fieldNorm(doc=4089)
      0.27272728 = coord(6/22)
    
    Abstract
    In der vorliegenden Arbeit wurde die Frage »Was ist digitale Mündigkeit?« untersucht. Dabei war das Ziel, die Merkmale und Dimensionen von digitaler Mündigkeit literaturbasiert aufzudecken und die Erkenntnisse in eine erste Definition zu überführen. Dies geschah wie folgt: In der Einleitung wurde zunächst umrissen, wie es zur Wahl des Forschungsthemas kam, inwiefern digitale Mündigkeit die Gesellschaft verändert und welche Bereiche der Lebenswelt mit diesem Begriff angesprochen sind. Daraus erfolgte die Herleitung der Forschungsfrage. Im zweiten Kapitel folgte sodann die Darlegung der theoretisch angelegten Forschungsmethode, die in diesem Fall auf einer zunächst breiten, dann enger gefassten Literaturrecherche basierte. Ergänzend wurde auf die Methode der philosophischen Begriffsanalyse hingewiesen, die für die spätere Definition zum Einsatz kommen sollte. Mit Kapitel 3 wurde sodann in Fachtermini und relevante Begriffe eingeführt, gefolgt von einer Darlegung der Bedeutung von Big Data, Codes und Algorithmen für Individuum und Gesellschaft. Dies geschah anhand exemplarisch aus redaktionellen Beiträgen herausgegriffener Themen. Anschließend wurde der aktuelle Forschungsstand zum Thema präsentiert. Dies erfolgte jedoch vornehmlich aus Gründen eines empirischen Überblicks, nicht, weil aus diesen Studien Daten gewonnen werden sollten (denn die Datengewinnung erfolgte aus der Literatur). Danach wurde digitale Mündigkeit als medienpädagogisches Feld erörtert. In Kapitel 4 wurden relevante Theorien zur Mündigkeit präsentiert. Die Extrakte aus den verschiedenen Ansätzen wurden in einem abschließenden Unterkapitel für die spätere Definition von digitaler Mündigkeit zusammengefasst. Inwiefern digitale Mündigkeit mit den Konzepten der Medienkompetenz und Medienbildung in Verbindung gebracht werden kann, wurde in Kapitel 5 untersucht. Zu diesem Zweck wurden zunächst Entwicklung und Ansätze des Medienkompetenzbegriffs erläutert, gleiches geschah anschließend mit dem Medienbildungsbegriff. Nach einer Darstellung des langjährigen bildungswissenschaftlichen Diskurses um Medienkompetenz und/oder Medienbildung, wurden schließlich die Verhältnisse zwischen digitaler Mündigkeit, Medienkompetenz und Medienbildung betrachtet. Alle Vorarbeiten mündeten hiernach im sechsten Kapitel, in welchem zunächst der Begriff der allgemeinen Mündigkeit analysiert und sodann die Begriffskomponente 'digital' betrachtet wurde. Die in den vorangegangenen Kapiteln geknüpften Fäden liefen schlussendlich in einer fundierten Definition von digitaler Mündigkeit zusammen. Der Hauptteil endete mit einem Blick auf verwandte Begriffe.
    Content
    Bachelorarbeit, angefertigt im Studiengang B.A. Bildungswissenschaft an der FernUniversität in Hagen - Fakultät Kultur- und Sozialwissenschaften - Institut für Bildungswissenschaft und Medienforschung Lehrgebiet Bildungstheorie und Medienpädagogik. Vgl.: https://lesen-schreiben-bilden.net/wp-content/uploads/2017/02/BA_BB_ohnePr%C3%A4si_public.pdf.
    Imprint
    Hagen : FernUniversität / Fakultät Kultur- und Sozialwissenschaften / Institut für Bildungswissenschaft und Medienforschung
  11. Küchler, J.: ¬Die Sacherschließung auf der Grundlage der Regensburger Aufstellungssystematiken in einer wissenschaftlichen Spezialbibliothek : dargestellt am Beispiel der Fachbibliothek Informatik der UB Dresden (1993) 0.03
    0.026679449 = product of:
      0.11738957 = sum of:
        0.059156876 = weight(_text_:informatik in 5916) [ClassicSimilarity], result of:
          0.059156876 = score(doc=5916,freq=2.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.563749 = fieldWeight in 5916, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.078125 = fieldNorm(doc=5916)
        0.014058149 = product of:
          0.028116299 = sum of:
            0.028116299 = weight(_text_:29 in 5916) [ClassicSimilarity], result of:
              0.028116299 = score(doc=5916,freq=2.0), product of:
                0.072342895 = queryWeight, product of:
                  3.5176873 = idf(docFreq=3565, maxDocs=44218)
                  0.02056547 = queryNorm
                0.38865322 = fieldWeight in 5916, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  3.5176873 = idf(docFreq=3565, maxDocs=44218)
                  0.078125 = fieldNorm(doc=5916)
          0.5 = coord(1/2)
        0.0059456457 = weight(_text_:in in 5916) [ClassicSimilarity], result of:
          0.0059456457 = score(doc=5916,freq=4.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.21253976 = fieldWeight in 5916, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.078125 = fieldNorm(doc=5916)
        0.030346027 = weight(_text_:computer in 5916) [ClassicSimilarity], result of:
          0.030346027 = score(doc=5916,freq=2.0), product of:
            0.0751567 = queryWeight, product of:
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.02056547 = queryNorm
            0.40377006 = fieldWeight in 5916, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.078125 = fieldNorm(doc=5916)
        0.0078828735 = product of:
          0.015765747 = sum of:
            0.015765747 = weight(_text_:science in 5916) [ClassicSimilarity], result of:
              0.015765747 = score(doc=5916,freq=2.0), product of:
                0.0541719 = queryWeight, product of:
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.02056547 = queryNorm
                0.2910318 = fieldWeight in 5916, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.078125 = fieldNorm(doc=5916)
          0.5 = coord(1/2)
      0.22727273 = coord(5/22)
    
    Abstract
    The thesis showed how the classification system of the Regensburg library could be applied in the special library for computer science of the Technical University of Dresden
    Date
    6.12.1996 19:56:29
  12. Lehrke, C.: Architektur von Suchmaschinen : Googles Architektur, insb. Crawler und Indizierer (2005) 0.02
    0.022731889 = product of:
      0.083350256 = sum of:
        0.029578438 = weight(_text_:informatik in 867) [ClassicSimilarity], result of:
          0.029578438 = score(doc=867,freq=2.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.2818745 = fieldWeight in 867, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.0390625 = fieldNorm(doc=867)
        0.014765427 = weight(_text_:und in 867) [ClassicSimilarity], result of:
          0.014765427 = score(doc=867,freq=14.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.32394084 = fieldWeight in 867, product of:
              3.7416575 = tf(freq=14.0), with freq of:
                14.0 = termFreq=14.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=867)
        0.014765427 = weight(_text_:und in 867) [ClassicSimilarity], result of:
          0.014765427 = score(doc=867,freq=14.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.32394084 = fieldWeight in 867, product of:
              3.7416575 = tf(freq=14.0), with freq of:
                14.0 = termFreq=14.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=867)
        0.0021021033 = weight(_text_:in in 867) [ClassicSimilarity], result of:
          0.0021021033 = score(doc=867,freq=2.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.07514416 = fieldWeight in 867, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=867)
        0.015173013 = weight(_text_:computer in 867) [ClassicSimilarity], result of:
          0.015173013 = score(doc=867,freq=2.0), product of:
            0.0751567 = queryWeight, product of:
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.02056547 = queryNorm
            0.20188503 = fieldWeight in 867, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              3.6545093 = idf(docFreq=3109, maxDocs=44218)
              0.0390625 = fieldNorm(doc=867)
        0.0069658435 = product of:
          0.013931687 = sum of:
            0.013931687 = weight(_text_:22 in 867) [ClassicSimilarity], result of:
              0.013931687 = score(doc=867,freq=2.0), product of:
                0.072016776 = queryWeight, product of:
                  3.5018296 = idf(docFreq=3622, maxDocs=44218)
                  0.02056547 = queryNorm
                0.19345059 = fieldWeight in 867, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  3.5018296 = idf(docFreq=3622, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=867)
          0.5 = coord(1/2)
      0.27272728 = coord(6/22)
    
    Abstract
    Das Internet mit seinen ständig neuen Usern und seinem extremen Wachstum bringt viele neue Herausforderungen mit sich. Aufgrund dieses Wachstums bedienen sich die meisten Leute der Hilfe von Suchmaschinen um Inhalte innerhalb des Internet zu finden. Suchmaschinen nutzen für die Beantwortung der User-Anfragen Information Retrieval Techniken. Problematisch ist nur, dass traditionelle Information Retrieval (IR) Systeme für eine relativ kleine und zusammenhängende Sammlung von Dokumenten entwickelt wurden. Das Internet hingegen unterliegt einem ständigen Wachstum, schnellen Änderungsraten und es ist über geographisch verteilte Computer verteilt. Aufgrund dieser Tatsachen müssen die alten Techniken erweitert oder sogar neue IRTechniken entwickelt werden. Eine Suchmaschine die diesen Herausforderungen vergleichsweise erfolgreich entgegnet ist Google. Ziel dieser Arbeit ist es aufzuzeigen, wie Suchmaschinen funktionieren. Der Fokus liegt dabei auf der Suchmaschine Google. Kapitel 2 wird sich zuerst mit dem Aufbau von Suchmaschinen im Allgemeinen beschäftigen, wodurch ein grundlegendes Verständnis für die einzelnen Komponenten geschaffen werden soll. Im zweiten Teil des Kapitels wird darauf aufbauend ein Überblick über die Architektur von Google gegeben. Kapitel 3 und 4 dienen dazu, näher auf die beiden Komponenten Crawler und Indexer einzugehen, bei denen es sich um zentrale Elemente im Rahmen von Suchmaschinen handelt.
    Content
    Ausarbeitung im Rahmen des Seminars Suchmaschinen und Suchalgorithmen, Institut für Wirtschaftsinformatik Praktische Informatik in der Wirtschaft, Westfälische Wilhelms-Universität Münster. - Vgl.: http://www-wi.uni-muenster.de/pi/lehre/ss05/seminarSuchen/Ausarbeitungen/ChristophLehrke.pdf
    Pages
    22 S
  13. Tinschert, H.: Untersuchungen zum Informationsverhalten von Studenten am Schlagwortkatalog der Staats- und Universitätsbibliothek Hamburg (1974) 0.02
    0.022162251 = product of:
      0.16252317 = sum of:
        0.015626261 = weight(_text_:und in 1588) [ClassicSimilarity], result of:
          0.015626261 = score(doc=1588,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.34282678 = fieldWeight in 1588, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.109375 = fieldNorm(doc=1588)
        0.13127063 = weight(_text_:schriftenreihe in 1588) [ClassicSimilarity], result of:
          0.13127063 = score(doc=1588,freq=2.0), product of:
            0.13211027 = queryWeight, product of:
              6.4238877 = idf(docFreq=194, maxDocs=44218)
              0.02056547 = queryNorm
            0.9936445 = fieldWeight in 1588, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              6.4238877 = idf(docFreq=194, maxDocs=44218)
              0.109375 = fieldNorm(doc=1588)
        0.015626261 = weight(_text_:und in 1588) [ClassicSimilarity], result of:
          0.015626261 = score(doc=1588,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.34282678 = fieldWeight in 1588, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.109375 = fieldNorm(doc=1588)
      0.13636364 = coord(3/22)
    
    Series
    Schriftenreihe der Bibliothekar-Lehrinstitute: Reihe A: Examensarbeiten; H.17/18
  14. Külper, U.; Will, G.: ¬Das Projekt Bücherschatz : interdisziplinäre und partizipative Entwicklung eines kindgerechten Bibliotheks-Online-Kataloges (1996) 0.02
    0.020702327 = product of:
      0.11386279 = sum of:
        0.05856232 = weight(_text_:informatik in 4725) [ClassicSimilarity], result of:
          0.05856232 = score(doc=4725,freq=4.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.55808306 = fieldWeight in 4725, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.0546875 = fieldNorm(doc=4725)
        0.024707288 = weight(_text_:und in 4725) [ClassicSimilarity], result of:
          0.024707288 = score(doc=4725,freq=20.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.54205674 = fieldWeight in 4725, product of:
              4.472136 = tf(freq=20.0), with freq of:
                20.0 = termFreq=20.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0546875 = fieldNorm(doc=4725)
        0.024707288 = weight(_text_:und in 4725) [ClassicSimilarity], result of:
          0.024707288 = score(doc=4725,freq=20.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.54205674 = fieldWeight in 4725, product of:
              4.472136 = tf(freq=20.0), with freq of:
                20.0 = termFreq=20.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0546875 = fieldNorm(doc=4725)
        0.0058858884 = weight(_text_:in in 4725) [ClassicSimilarity], result of:
          0.0058858884 = score(doc=4725,freq=8.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.21040362 = fieldWeight in 4725, product of:
              2.828427 = tf(freq=8.0), with freq of:
                8.0 = termFreq=8.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0546875 = fieldNorm(doc=4725)
      0.18181819 = coord(4/22)
    
    Abstract
    Im Jahr 1995 entstand in interdisziplinärer Zusammenarbeit der Prototyp Bücherschatz, ein Bibliotheks-Online_katalog für Kinder. Beteiligt waren Studierende und eine Professorin der FH Hamburg, Fb Bibliothek und Information, ein Designer und 2 Informatikerinnen der Universität Hamburg. In diesem Bericht werden sowohl das Produkt Bücherschatz als auch der Prozeß seiner Entwicklung beschrieben. Ein Schwerpunkt liegt in der Auseinandersetzung mit theoretischen Modellen der Softwaretechnik - hier STEPS und Prototyping - und ihrer Anpassung an konkrete Projekterfordernisse. Weiterhin werden Fragen nach der Gestaltung kindgerechter Software, der Organisation eines großen Projektteams und nach der Art der Partizipation der Benutzer thematisiert. Das Gesamtprojekt wird in einen wissenschaftlichen Kontext der Informatik eingeordnet, und zentrale Erfahrungen und Erkenntnisse hinsichtlich interdisziplinärer und partizipativer Softwareentwicklung werden zusammengefaßt
    Imprint
    Hamburg : Universität, FB Informatik
  15. Ernst, S.: Aspekte der Modellierung und Visualisierung von XML Topic Maps : die Entwicklung von Stylesheets zur Darstellung von Topic Maps in SVG (2004) 0.02
    0.019435707 = product of:
      0.10689639 = sum of:
        0.013393938 = weight(_text_:und in 2216) [ClassicSimilarity], result of:
          0.013393938 = score(doc=2216,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.29385152 = fieldWeight in 2216, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.09375 = fieldNorm(doc=2216)
        0.013393938 = weight(_text_:und in 2216) [ClassicSimilarity], result of:
          0.013393938 = score(doc=2216,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.29385152 = fieldWeight in 2216, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.09375 = fieldNorm(doc=2216)
        0.07506347 = product of:
          0.15012693 = sum of:
            0.15012693 = weight(_text_:sign in 2216) [ClassicSimilarity], result of:
              0.15012693 = score(doc=2216,freq=2.0), product of:
                0.15260035 = queryWeight, product of:
                  7.4202213 = idf(docFreq=71, maxDocs=44218)
                  0.02056547 = queryNorm
                0.9837916 = fieldWeight in 2216, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  7.4202213 = idf(docFreq=71, maxDocs=44218)
                  0.09375 = fieldNorm(doc=2216)
          0.5 = coord(1/2)
        0.0050450475 = weight(_text_:in in 2216) [ClassicSimilarity], result of:
          0.0050450475 = score(doc=2216,freq=2.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.18034597 = fieldWeight in 2216, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.09375 = fieldNorm(doc=2216)
      0.18181819 = coord(4/22)
    
    Footnote
    Im Bestand der UB Bielefeld (Sign.: NA188.00 E71)
  16. Nagelschmidt, M.: Integration und Anwendung von "Semantic Web"-Technologien im betrieblichen Wissensmanagement (2012) 0.02
    0.019251987 = product of:
      0.08470874 = sum of:
        0.04183023 = weight(_text_:informatik in 11) [ClassicSimilarity], result of:
          0.04183023 = score(doc=11,freq=4.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.39863077 = fieldWeight in 11, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.0390625 = fieldNorm(doc=11)
        0.017648064 = weight(_text_:und in 11) [ClassicSimilarity], result of:
          0.017648064 = score(doc=11,freq=20.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.3871834 = fieldWeight in 11, product of:
              4.472136 = tf(freq=20.0), with freq of:
                20.0 = termFreq=20.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=11)
        0.017648064 = weight(_text_:und in 11) [ClassicSimilarity], result of:
          0.017648064 = score(doc=11,freq=20.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.3871834 = fieldWeight in 11, product of:
              4.472136 = tf(freq=20.0), with freq of:
                20.0 = termFreq=20.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=11)
        0.00364095 = weight(_text_:in in 11) [ClassicSimilarity], result of:
          0.00364095 = score(doc=11,freq=6.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.1301535 = fieldWeight in 11, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=11)
        0.0039414368 = product of:
          0.0078828735 = sum of:
            0.0078828735 = weight(_text_:science in 11) [ClassicSimilarity], result of:
              0.0078828735 = score(doc=11,freq=2.0), product of:
                0.0541719 = queryWeight, product of:
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.02056547 = queryNorm
                0.1455159 = fieldWeight in 11, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=11)
          0.5 = coord(1/2)
      0.22727273 = coord(5/22)
    
    Abstract
    Das Wissensmanagement ist ein Themenkomplex mit zahlreichen fachlichen Bezügen, insbesondere zur Wirtschaftsinformatik und der Management-, Personal- und Organisationslehre als Teilbereiche der Betriebswirtschaftslehre. In einem weiter gefassten Verständnis bestehen aber auch Bezüge zur Organisationspsychologie, zur Informatik und zur Informationswissenschaft. Von den Entwicklungen in diesen Bezugsdisziplinen können deshalb auch Impulse für die Konzepte, Methodiken und Technologien des Wissensmanagements ausgehen. Die aus der Informatik stammende Idee, das World Wide Web (WWW) zu einem semantischen Netz auszubauen, kann als eine solche impulsgebende Entwicklung gesehen werden. Im Verlauf der vergangenen Dekade hat diese Idee einen hinreichenden Reifegrad erreicht, so dass eine potenzielle Relevanz auch für das Wissensmanagement unterstellt werden darf. Im Rahmen dieser Arbeit soll anhand eines konkreten, konzeptionellen Ansatzes demonstriert werden, wie dieser technologische Impuls für das Wissensmanagement nutzenbringend kanalisiert werden kann. Ein derartiges Erkenntnisinteresse erfordert zunächst die Erarbeitung eines operationalen Verständnisses von Wissensmanagement, auf dem die weiteren Betrachtungen aufbauen können. Es werden außerdem die Architektur und die Funktionsweise eines "Semantic Web" sowie XML und die Ontologiesprachen RDF/RDFS und OWL als maßgebliche Werkzeuge für eine ontologiebasierte Wissensrepräsentation eingeführt. Anschließend wird zur Integration und Anwendung dieser semantischen Technologien in das Wissensmanagement ein Ansatz vorgestellt, der eine weitgehend automatisierte Wissensmodellierung und daran anschließende, semantische Informationserschließung der betrieblichen Datenbasis beschreibt. Zur Veranschaulichung wird dazu auf eine fiktive Beispielwelt aus der Fertigungsindustrie zurückgegriffen. Schließlich soll der Nutzen dieser Vorgehensweise durch Anwendungsszenarien des Information Retrieval (IR) im Kontext von Geschäftsprozessen illustriert werden.
    Content
    Masterarbeit im weiterbildenden Masterstudiengang "Hagener Masterstudium Management" zur Erlangung des akademischen Grades "Master of Science"
  17. Renker, L.: Exploration von Textkorpora : Topic Models als Grundlage der Interaktion (2015) 0.02
    0.018698731 = product of:
      0.082274415 = sum of:
        0.04183023 = weight(_text_:informatik in 2380) [ClassicSimilarity], result of:
          0.04183023 = score(doc=2380,freq=4.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.39863077 = fieldWeight in 2380, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.0390625 = fieldNorm(doc=2380)
        0.014765427 = weight(_text_:und in 2380) [ClassicSimilarity], result of:
          0.014765427 = score(doc=2380,freq=14.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.32394084 = fieldWeight in 2380, product of:
              3.7416575 = tf(freq=14.0), with freq of:
                14.0 = termFreq=14.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=2380)
        0.014765427 = weight(_text_:und in 2380) [ClassicSimilarity], result of:
          0.014765427 = score(doc=2380,freq=14.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.32394084 = fieldWeight in 2380, product of:
              3.7416575 = tf(freq=14.0), with freq of:
                14.0 = termFreq=14.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0390625 = fieldNorm(doc=2380)
        0.006971888 = weight(_text_:in in 2380) [ClassicSimilarity], result of:
          0.006971888 = score(doc=2380,freq=22.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.24922498 = fieldWeight in 2380, product of:
              4.690416 = tf(freq=22.0), with freq of:
                22.0 = termFreq=22.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0390625 = fieldNorm(doc=2380)
        0.0039414368 = product of:
          0.0078828735 = sum of:
            0.0078828735 = weight(_text_:science in 2380) [ClassicSimilarity], result of:
              0.0078828735 = score(doc=2380,freq=2.0), product of:
                0.0541719 = queryWeight, product of:
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.02056547 = queryNorm
                0.1455159 = fieldWeight in 2380, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  2.6341193 = idf(docFreq=8627, maxDocs=44218)
                  0.0390625 = fieldNorm(doc=2380)
          0.5 = coord(1/2)
      0.22727273 = coord(5/22)
    
    Abstract
    Das Internet birgt schier endlose Informationen. Ein zentrales Problem besteht heutzutage darin diese auch zugänglich zu machen. Es ist ein fundamentales Domänenwissen erforderlich, um in einer Volltextsuche die korrekten Suchanfragen zu formulieren. Das ist jedoch oftmals nicht vorhanden, so dass viel Zeit aufgewandt werden muss, um einen Überblick des behandelten Themas zu erhalten. In solchen Situationen findet sich ein Nutzer in einem explorativen Suchvorgang, in dem er sich schrittweise an ein Thema heranarbeiten muss. Für die Organisation von Daten werden mittlerweile ganz selbstverständlich Verfahren des Machine Learnings verwendet. In den meisten Fällen bleiben sie allerdings für den Anwender unsichtbar. Die interaktive Verwendung in explorativen Suchprozessen könnte die menschliche Urteilskraft enger mit der maschinellen Verarbeitung großer Datenmengen verbinden. Topic Models sind ebensolche Verfahren. Sie finden in einem Textkorpus verborgene Themen, die sich relativ gut von Menschen interpretieren lassen und sind daher vielversprechend für die Anwendung in explorativen Suchprozessen. Nutzer können damit beim Verstehen unbekannter Quellen unterstützt werden. Bei der Betrachtung entsprechender Forschungsarbeiten fiel auf, dass Topic Models vorwiegend zur Erzeugung statischer Visualisierungen verwendet werden. Das Sensemaking ist ein wesentlicher Bestandteil der explorativen Suche und wird dennoch nur in sehr geringem Umfang genutzt, um algorithmische Neuerungen zu begründen und in einen umfassenden Kontext zu setzen. Daraus leitet sich die Vermutung ab, dass die Verwendung von Modellen des Sensemakings und die nutzerzentrierte Konzeption von explorativen Suchen, neue Funktionen für die Interaktion mit Topic Models hervorbringen und einen Kontext für entsprechende Forschungsarbeiten bieten können.
    Footnote
    Masterthesis zur Erlangung des akademischen Grades Master of Science (M.Sc.) vorgelegt an der Fachhochschule Köln / Fakultät für Informatik und Ingenieurswissenschaften im Studiengang Medieninformatik.
    Imprint
    Gummersbach : Fakultät für Informatik und Ingenieurswissenschaften
    Theme
    Semantisches Umfeld in Indexierung u. Retrieval
  18. Schneider, W.: ¬Ein verteiltes Bibliotheks-Informationssystem auf Basis des Z39.50 Protokolls (1999) 0.02
    0.018694572 = product of:
      0.082256116 = sum of:
        0.0473255 = weight(_text_:informatik in 4773) [ClassicSimilarity], result of:
          0.0473255 = score(doc=4773,freq=2.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.4509992 = fieldWeight in 4773, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.0625 = fieldNorm(doc=4773)
        0.008929292 = weight(_text_:und in 4773) [ClassicSimilarity], result of:
          0.008929292 = score(doc=4773,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.19590102 = fieldWeight in 4773, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0625 = fieldNorm(doc=4773)
        0.008929292 = weight(_text_:und in 4773) [ClassicSimilarity], result of:
          0.008929292 = score(doc=4773,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.19590102 = fieldWeight in 4773, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.0625 = fieldNorm(doc=4773)
        0.011246519 = product of:
          0.022493038 = sum of:
            0.022493038 = weight(_text_:29 in 4773) [ClassicSimilarity], result of:
              0.022493038 = score(doc=4773,freq=2.0), product of:
                0.072342895 = queryWeight, product of:
                  3.5176873 = idf(docFreq=3565, maxDocs=44218)
                  0.02056547 = queryNorm
                0.31092256 = fieldWeight in 4773, product of:
                  1.4142135 = tf(freq=2.0), with freq of:
                    2.0 = termFreq=2.0
                  3.5176873 = idf(docFreq=3565, maxDocs=44218)
                  0.0625 = fieldNorm(doc=4773)
          0.5 = coord(1/2)
        0.0058255196 = weight(_text_:in in 4773) [ClassicSimilarity], result of:
          0.0058255196 = score(doc=4773,freq=6.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.2082456 = fieldWeight in 4773, product of:
              2.4494898 = tf(freq=6.0), with freq of:
                6.0 = termFreq=6.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.0625 = fieldNorm(doc=4773)
      0.22727273 = coord(5/22)
    
    Abstract
    Diese Diplomarbeit beschreibt ein verteiltes Bibliothes-Informationssystem für bibliographische Datenbnaken im Internet. Der Name des Systems ist ZACK. Der Benutzer kann mit ZACK in einer oder mehreren bibliographischen Datenbanken nach einem Dokument suchen und die Treffer in die eigene lokale Datenbank übernehmen. Mit der Übernahme der Datensätze aus einer fremden Datenbank wird die Erfassung neuer Dokumente wesentlich erleichtert, da die Eigenkatalogisierung auf ein Minimum beschränkt werden kann. Bei der verteilten Suche mit ZACK wird parallel in mehreren Datenbanken gesucht. Dubletten werden als solche erkannt
    Date
    17. 5.2000 20:29:41
    Imprint
    Berlin : TU Berlin, FB Informatik
  19. Mertens, T.: Vergleich von Archiv- und Dokumentenmanagementsystemen für die betriebliche Anwendung (2000) 0.02
    0.018555671 = product of:
      0.10205619 = sum of:
        0.05019627 = weight(_text_:informatik in 4651) [ClassicSimilarity], result of:
          0.05019627 = score(doc=4651,freq=4.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.4783569 = fieldWeight in 4651, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.046875 = fieldNorm(doc=4651)
        0.024146264 = weight(_text_:und in 4651) [ClassicSimilarity], result of:
          0.024146264 = score(doc=4651,freq=26.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.5297484 = fieldWeight in 4651, product of:
              5.0990195 = tf(freq=26.0), with freq of:
                26.0 = termFreq=26.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.046875 = fieldNorm(doc=4651)
        0.024146264 = weight(_text_:und in 4651) [ClassicSimilarity], result of:
          0.024146264 = score(doc=4651,freq=26.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.5297484 = fieldWeight in 4651, product of:
              5.0990195 = tf(freq=26.0), with freq of:
                26.0 = termFreq=26.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.046875 = fieldNorm(doc=4651)
        0.0035673876 = weight(_text_:in in 4651) [ClassicSimilarity], result of:
          0.0035673876 = score(doc=4651,freq=4.0), product of:
            0.027974274 = queryWeight, product of:
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.02056547 = queryNorm
            0.12752387 = fieldWeight in 4651, product of:
              2.0 = tf(freq=4.0), with freq of:
                4.0 = termFreq=4.0
              1.3602545 = idf(docFreq=30841, maxDocs=44218)
              0.046875 = fieldNorm(doc=4651)
      0.18181819 = coord(4/22)
    
    Abstract
    Ziel der vorliegenden Arbeit ist die Untersuchung der funktionalen und nicht-funktionalen Aspekte von Dokumentenmanagement- und Archivsystemen. Hierzu zählen auf der Seite der nicht-funktionalen Aspekte z.B. Benutzeroberfläche, Architektur, Interoperabilität, Integrationsfähigkeit etc. Diese Aspekte werden klassifiziert, priorisiert und bewertet. Hierbei müssen auch die Beziehungen zwischen den einzelnen Aspekten wie z.B. Substitutivität, Komplementarität und Konkurrenz berücksichtigt werden. Das Ergebnis dieser Untersuchung ist eine sog. Zielhierarchie. Anschließend sollen in einer Marktanalyse verschiedene Dokumentenmanagement- und Archivsysteme verglichen werden. Hierzu wird eine Bewertungsmethode, die die o.g. Zielhierarchie berücksichtigt, erarbeitet und danach auf die Menge der zu untersuchenden Systeme angewendet. Das Ergebnis dieser Marktanalyse ist die Beschreibung von Dokumentenmanagement- und Archivsystemen und die Ermittlung gemeinsamer funktionaler und nicht-funktionaler Aspekte, die viele dieser Systeme gemein haben und somit ein Anzeichen für grundlegende Funktionalität ist, aber auch das Erkennen von fehlenden, aber notwendigen bzw. wünschenswerten Funktionalitäten. Aufgrund dieses Ziels und der Nutzung eines formalen Bewertungsverfahrens unterscheidet sich die vorliegenden Arbeit von anderen Publikationen, in denen ebenfalls Dokumentenmanagement- und Archivsysteme verglichen werden.
    Content
    Diplomarbeit am Fachbereich Informatik der Universität Dortmund
    Imprint
    Dortmund : Universität / Fachbereich Informatik
  20. Fischer, A.: Entwurf und Implementierung eines thesaurusunterstützten Suchwerkzeugs für eine Videodatenbank (1995) 0.02
    0.017777476 = product of:
      0.13036816 = sum of:
        0.094651 = weight(_text_:informatik in 5253) [ClassicSimilarity], result of:
          0.094651 = score(doc=5253,freq=2.0), product of:
            0.104934774 = queryWeight, product of:
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.02056547 = queryNorm
            0.9019984 = fieldWeight in 5253, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              5.1024737 = idf(docFreq=730, maxDocs=44218)
              0.125 = fieldNorm(doc=5253)
        0.017858583 = weight(_text_:und in 5253) [ClassicSimilarity], result of:
          0.017858583 = score(doc=5253,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.39180204 = fieldWeight in 5253, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.125 = fieldNorm(doc=5253)
        0.017858583 = weight(_text_:und in 5253) [ClassicSimilarity], result of:
          0.017858583 = score(doc=5253,freq=2.0), product of:
            0.04558063 = queryWeight, product of:
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.02056547 = queryNorm
            0.39180204 = fieldWeight in 5253, product of:
              1.4142135 = tf(freq=2.0), with freq of:
                2.0 = termFreq=2.0
              2.216367 = idf(docFreq=13101, maxDocs=44218)
              0.125 = fieldNorm(doc=5253)
      0.13636364 = coord(3/22)
    
    Imprint
    Darmstadt : Fachhochschule, Fb Informatik

Years

Languages

  • d 621
  • e 44
  • f 3
  • a 1
  • hu 1
  • pt 1
  • More… Less…

Types

  • el 30
  • m 21
  • r 2
  • a 1
  • More… Less…

Themes

Subjects

Classifications